%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.14",
%%%     date            = "10 September 2014",
%%%     time            = "07:34:55 MDT",
%%%     filename        = "jacm.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     checksum        = "53650 9369 51515 485387",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM Journal of Experimental Algorithmics;
%%%                        bibliography; BibTeX; JEA",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE bibliography of the
%%%                        ACM Journal of Experimental Algorithmics
%%%                        (CODEN none, ISSN 1084-6654).  The journal
%%%                        appears once a year, and has no separate
%%%                        issue numbers.
%%%
%%%                        At version 1.14, the COMPLETE year coverage
%%%                        looked like this:
%%%
%%%                             1996 (   4)    2003 (   6)    2010 (  12)
%%%                             1997 (   5)    2004 (   6)    2011 (  18)
%%%                             1998 (   9)    2005 (  15)    2012 (   2)
%%%                             1999 (   8)    2006 (  17)    2013 (  12)
%%%                             2000 (  17)    2007 (   0)    2014 (   5)
%%%                             2001 (  10)    2008 (  29)
%%%                             2002 (  12)    2009 (  28)
%%%
%%%                             Article:        215
%%%
%%%                             Total entries:  215
%%%
%%%                        The author will be grateful for reports of
%%%                        any errors or omissions in this file; they
%%%                        will be corrected in future editions.
%%%
%%%                        Articles and letters or corrections that
%%%                        comment on them are cross-referenced in both
%%%                        directions, so that citation of one of them
%%%                        will automatically include the others.
%%%
%%%                        The ACM maintains Web pages  for this journal at
%%%
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J430
%%%
%%%                        That data has been automatically converted
%%%                        to BibTeX form, corrected for spelling and
%%%                        page number errors, and merged into this
%%%                        file.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the companion
%%%                        file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by the biblabel software
%%%                        developed for the BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, with the help of
%%%                        ``bibsort -byvolume''.  The bibsort utility,
%%%                        and several related programs for
%%%                        bibliography maintenance, is available on
%%%                        ftp.math.utah.edu in /pub/tex/bib, and at
%%%                        other Internet sites which mirror it,
%%%                        including the Comprehensive TeX Archive
%%%                        Network (CTAN); the command `finger
%%%                        ctan<at>tug.org' will produce a list of
%%%                        CTAN hosts.
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================

@Preamble{
   "\input path.sty"
 # "\hyphenation{ }"
 # "\ifx \undefined \mathbb \def \mathbb #1{{\bf #1}}\fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:

@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:

@String{j-ACM-J-EXP-ALGORITHMICS = "ACM Journal of Experimental Algorithmics"}

%%% ====================================================================
%%% Bibliography entries, sorted in publication order.

@Article{Knuth:1996:II,
  author =       "Donald E. Knuth",
  title =        "Irredundant intervals",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/235141.235146",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This expository note presents simplifications of a
                 theorem due to Gy{\H{o}}ri and an algorithm due to
                 Franzblau and Kleitman: Given a family $F$ of $m$
                 intervals on a linearly ordered set of n elements, we
                 can construct in $O(m + n)^2$ steps an irredundant
                 subfamily having maximum cardinality, as well as a
                 generating family having minimum cardinality. The
                 algorithm is of special interest because it solves a
                 problem analogous to finding a maximum independent set,
                 but on a class of objects that is more general than a
                 matroid. This note is also a complete, runnable
                 computer program, which can be used for experiments in
                 conjunction with the public-domain software of The
                 Stanford GraphBase.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gittleman:1996:PSS,
  author =       "Arthur Gittleman",
  title =        "Predicting string search speed",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/235141.235147",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "String search is fundamental in many text processing
                 applications. Sunday recently gave several algorithms
                 to find the first occurrence of a pattern string as a
                 substring of a text, providing experimental data from
                 searches in a text of about 200K characters to support
                 his claim that his algorithms are faster than the
                 standard Boyer--Moore algorithm. We present a
                 methodology for the average-case analysis of the
                 performance of string search algorithms---for such
                 algorithms, a worst-case analysis does not yield much
                 useful information, since the performance of the
                 algorithm is directly affected by such characteristics
                 as the size of the character set, the character
                 frequencies, and the structure of the text. Knuth
                 described a finite automaton which can be used to save
                 information about character comparisons. Baeza-Yates,
                 Gonnet, and Regnier gave a probabilistic analysis of
                 the worst- and average-case behavior of a string search
                 algorithm based upon such an automaton. We construct
                 Knuth automata to model Sunday's algorithms and use the
                 methods of Baeza-Yates et al. to obtain an average-case
                 analysis which confirms Sunday's experimental data.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bader:1996:PPA,
  author =       "David A. Bader and David R. Helman and Joseph
                 J{\'a}J{\'a}",
  title =        "Practical parallel algorithms for personalized
                 communication and integer sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/235141.235148",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A fundamental challenge for parallel computing is to
                 obtain high-level, architecture independent, algorithms
                 which efficiently execute on general-purpose parallel
                 machines. With the emergence of message passing
                 standards such as MPI, it has become easier to design
                 efficient and portable parallel algorithms by making
                 use of these communication primitives. While existing
                 primitives allow an assortment of collective
                 communication routines, they do not handle an important
                 communication event when most or all processors have
                 non-uniformly sized personalized messages to exchange
                 with each other. We focus in this paper on the
                 h-relation personalized communication whose efficient
                 implementation will allow high performance
                 implementations of a large class of algorithms. While
                 most previous h-relation algorithms use randomization,
                 this paper presents a new deterministic approach for
                 h-relation personalized communication with
                 asymptotically optimal complexity for h>p$^2$. As an
                 application, we present an efficient algorithm for
                 stable integer sorting. The algorithms presented in
                 this paper have been coded in Split-C and run on a
                 variety of platforms, including the Thinking Machines
                 CM-5, IBM SP-1 and SP-2, Cray Research T3D, Meiko
                 Scientific CS-2, and the Intel Paragon. Our
                 experimental results are consistent with the
                 theoretical analysis and illustrate the scalability and
                 efficiency of our algorithms across different
                 platforms. In fact, they seem to outperform all similar
                 algorithms known to the authors on these platforms.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{LaMarca:1996:ICP,
  author =       "Anthony LaMarca and Richard Ladner",
  title =        "The influence of caches on the performance of heaps",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/235141.235145",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "As memory access times grow larger relative to
                 processor cycle times, the cache performance of
                 algorithms has an increasingly large impact on overall
                 performance. Unfortunately, most commonly used
                 algorithms were not designed with cache performance in
                 mind. This paper investigates the cache performance of
                 implicit heaps. We present optimizations which
                 significantly reduce the cache misses that heaps incur
                 and improve their overall performance. We present an
                 analytical model called collective analysis that allows
                 cache performance to be predicted as a function of both
                 cache configuration and algorithm configuration. As
                 part of our investigation, we perform an approximate
                 analysis of the cache performance of both traditional
                 heaps and our improved heaps in our model. In addition
                 empirical data is given for five architectures to show
                 the impact our optimizations have on overall
                 performance. We also revisit a priority queue study
                 originally performed by Jones [25]. Due to the
                 increases in cache miss penalties, the relative
                 performance results we obtain on today's machines
                 differ greatly from the machines of only ten years ago.
                 We compare the performance of implicit heaps, skew
                 heaps and splay trees and discuss the difference
                 between our results and Jones's.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchsbaum:1997:AAS,
  author =       "Adam L. Buchsbaum and Raffaele Giancarlo",
  title =        "Algorithmic aspects in speech recognition: an
                 introduction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/264216.264219",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Speech recognition is an area with a considerable
                 literature, but there is little discussion of the topic
                 within the computer science algorithms literature. Many
                 computer scientists, however, are interested in the
                 computational problems of speech recognition. This
                 paper presents the field of speech recognition and
                 describes some of its major open problems from an
                 algorithmic viewpoint. Our goal is to stimulate the
                 interest of algorithm designers and experimenters to
                 investigate the algorithmic problems of effective
                 automatic speech recognition.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "automata theory; graph searching",
}

@Article{Battiti:1997:RSH,
  author =       "Roberto Battiti and Marco Protasi",
  title =        "Reactive search, a history-sensitive heuristic for
                 {MAX}-{SAT}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/264216.264220",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Reactive Search (RS) method proposes the
                 integration of a simple history-sensitive (machine
                 learning) scheme into local search for the on-line
                 determination of free parameters. In this paper a new
                 RS algorithm is proposed for the approximated solution
                 of the Maximum Satisfiability problem: a component
                 based on local search with temporary prohibitions (Tabu
                 Search) is complemented with a reactive scheme that
                 determines the appropriate value of the prohibition
                 parameter by monitoring the Hamming distance along the
                 search trajectory. The proposed algorithm (H-RTS) can
                 therefore be characterized as a dynamic version of Tabu
                 Search. In addition, the non-oblivious functions
                 recently introduced in the framework of approximation
                 algorithms are used to discover a better local optimum
                 in the initial part of the search. The algorithm is
                 developed in two phases. First the bias-diversification
                 properties of individual candidate components are
                 analyzed by extensive empirical evaluation, then a
                 reactive scheme is added to the winning component,
                 based on Tabu Search. The final tests on a benchmark of
                 random MAX-3-SAT and MAX-4-SAT problems demonstrate the
                 superiority of H-RTS with respect to alternative
                 heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Smith:1997:EHF,
  author =       "Bradley J. Smith and Gregory L. Heileman and Chaouki
                 Abdallah",
  title =        "The exponential hash function",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/264216.264221",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper an efficient open address hash function
                 called exponential hashing is developed. The motivation
                 for this hash function resulted from our ongoing
                 efforts to apply dynamical systems theory to the study
                 of hashing; however, the analysis conducted in this
                 paper is primarily based on traditional number theory.
                 Proofs of optimal table parameter choices are provided
                 for a number of hash functions. We also demonstrate
                 experimentally that exponential hashing essentially
                 matches the performance of a widely-used optimal double
                 hash function for uniform data distributions, and
                 performs significantly better for nonuniform data
                 distributions. We show that exponential hashing
                 exhibits a higher integer Lyapunov exponent and entropy
                 than double hashing for initial data probes, which
                 offers one explanation for its improved performance on
                 nonuniform data distributions.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "ADT; chaos; dynamic dictionary; dynamical systems
                 theory; exponential hashing; Lyapunov exponent; number
                 theory",
}

@Article{Purchase:1997:ESB,
  author =       "H. C. Purchase and R. F. Cohen and M. I. James",
  title =        "An experimental study of the basis for graph drawing
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/264216.264222",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Designers of graph drawing algorithms and systems
                 claim to illuminate application data by producing
                 layouts that optimise measurable aesthetic qualities.
                 Examples of these aesthetics include symmetry (where
                 possible, a symmetrical view of the graph should be
                 displayed), minimise arc crossing(the number of arc
                 crossings in the display should be minimised), and
                 minimise bends (the total number of bends in polyline
                 arcs should be minimised).The aim of this paper is to
                 describe our work to validate these claims by
                 performing empirical studies of human understanding of
                 graphs drawn using various layout aesthetics. This work
                 is important since it helps indicate to algorithm and
                 system designers what are the aesthetic qualities most
                 important to aid understanding, and consequently to
                 build more effective systems.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "human-computer interaction",
}

@Article{Alberts:1997:ESD,
  author =       "David Alberts and Giuseppe Cattaneo and Giuseppe F.
                 Italiano",
  title =        "An empirical study of dynamic graph algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/264216.264223",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The contributions of this paper are both of
                 theoretical and of experimental nature. From the
                 experimental point of view, we conduct an empirical
                 study on some dynamic connectivity algorithms which
                 where developed recently. In particular, the following
                 implementations were tested and compared with simple
                 algorithms: simple sparsification by Eppstein et al.
                 and the recent randomized algorithm by Henzinger and
                 King. In our experiments, we considered both random and
                 non-random inputs. Moreover, we present a simplified
                 variant of the algorithm by Henzinger and King, which
                 for random inputs was always faster than the original
                 implementation. For non-random inputs, simple
                 sparsification was the fastest algorithm for small
                 sequences of updates; for medium and large sequences of
                 updates, the original algorithm by Henzinger and King
                 was faster. From the theoretical point of view, we
                 analyze the average case running time of simple
                 sparsification and prove that for dynamic random graphs
                 its logarithmic overhead vanishes.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hemaspaandra:1998:PBA,
  author =       "Lane A. Hemaspaandra and Kulathur S. Rajasethupathy
                 and Prasanna Sethupathy and Marius Zimand",
  title =        "Power balance and apportionment algorithms for the
                 {United States Congress}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297106",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We measure the performance, in the task of
                 apportioning the Congress of the United States, of an
                 algorithm combining a heuristic-driven (simulated
                 annealing) search with an exact-computation dynamic
                 programming evaluation of the apportionments visited in
                 the search. We compare this with the actual algorithm
                 currently used in the United States to apportion
                 Congress, and with a number of other algorithms that
                 have been proposed. We conclude that on every set of
                 census data in this country's history, the
                 heuristic-driven apportionment provably yields far
                 fairer apportionments than those of any of the other
                 algorithm considered, including the algorithm currently
                 used by the United States for Congressional
                 apportionment.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "apportionment algorithms; power indices; simulated
                 annealing",
}

@Article{Cho:1998:WBL,
  author =       "Seonghun Cho and Sartaj Sahni",
  title =        "Weight-biased leftist trees and modified skip lists",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297111",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose the weight biased leftist tree as an
                 alternative to traditional leftist trees [CRAN72] for
                 the representation of mergeable priority queues. A
                 modified version of skip lists [PUGH90] that uses fixed
                 size nodes is also proposed. Experimental results show
                 our modified skip list structure is faster than the
                 original skip list structure for the representation of
                 dictionaries. Experimental results comparing weight
                 biased leftist trees and competing priority queue
                 structures are presented.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dictionary; leftist trees; priority queues; skip
                 lists",
}

@Article{Yan:1998:LBE,
  author =       "Yong Yan and Xiaodong Zhang",
  title =        "Lock bypassing: an efficient algorithm for
                 concurrently accessing priority heaps",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297116",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The heap representation of priority queues is one of
                 the most widely used data structures in the design of
                 parallel algorithms. Efficiently exploiting the
                 parallelism of a priority heap has significant
                 influence on the efficiency of a wide range of
                 applications and parallel algorithms. In this paper, we
                 propose an aggressive priority heap operating
                 algorithm, called the lock bypassing algorithm (LB) on
                 shared memory systems. This algorithm minimizes
                 interference of concurrent enqueue and dequeue
                 operations on priority heaps while keeping the strict
                 priority property: a dequeue always returns the minimum
                 of a heap. The unique idea that distinguishes the LB
                 algorithm from previous concurrent algorithms on
                 priority heaps is the use of locking-on-demand and
                 lock-bypassing techniques to minimize locking
                 granularity and to maximize parallelism. The LB
                 algorithm allows an enqueue operation to bypass the
                 locks along its insertion path until it reaches a
                 possible place where it can perform the insertion.
                 Meanwhile a dequeue operation also makes its locking
                 range and locking period as small as possible by
                 carefully tuning its execution procedure. The LB
                 algorithm is shown to be correct in terms of deadlock
                 freedom and heap consistency. The performance of the LB
                 algorithm was evaluated analytically and experimentally
                 in comparison with previous algorithms. Analytical
                 results show that the LB algorithm reduces by half the
                 number of locks waited for in the worst case by
                 previous algorithms. The experimental results show that
                 the LB algorithm outperforms previously designed
                 algorithms by up to a factor of 2 in hold time.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "aggressive locking; parallel algorithm; performance
                 evaluation; priority heap; shared-memory system",
}

@Article{Helman:1998:NDP,
  author =       "David R. Helman and Joseph J{\'a}J{\'a} and David A.
                 Bader",
  title =        "A new deterministic parallel sorting algorithm with an
                 experimental evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297128",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce a new deterministic parallel sorting
                 algorithm for distributed memory machines based on the
                 regular sampling approach. The algorithm uses only two
                 rounds of regular all-to-all personalized communication
                 in a scheme that yields very good load balancing with
                 virtually no overhead. Moreover, unlike previous
                 variations, our algorithm efficiently handles the
                 presence of duplicate values without the overhead of
                 tagging each element with a unique identifier. This
                 algorithm was implemented in SPLIT-C and run on a
                 variety of platforms, including the Thinking Machines
                 CM-5, the IBM SP-2-WN, and the Cray Research T3D. We
                 ran our code using widely different benchmarks to
                 examine the dependence of our algorithm on the input
                 distribution. Our experimental results illustrate the
                 efficiency and scalability of our algorithm across
                 different platforms. In fact, the performance compares
                 closely to that of our random sample sort algorithm,
                 which seems to outperform all similar algorithms known
                 to the authors on these platforms. Together, their
                 performance is nearly invariant over the set of input
                 distributions, unlike previous efficient algorithms.
                 However, unlike our randomized sorting algorithm, the
                 performance and memory requirements of our regular
                 sorting algorithm can be deterministically
                 guaranteed.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "generalized sorting; integer sorting; parallel
                 algorithms; parallel performance; sorting by regular
                 sampling",
}

@Article{Frigioni:1998:EAD,
  author =       "Daniele Frigioni and Mario Ioffreda and Umberto Nanni
                 and Giulio Pasqualone",
  title =        "Experimental analysis of dynamic algorithms for the
                 single",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297147",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we propose the first experimental study
                 of the fully dynamic single-source shortest-paths
                 problem on directed graphs with positive real edge
                 weights. In particular, we perform an experimental
                 analysis of three different algorithms: Dijkstra's
                 algorithm, and the two output bounded algorithms
                 proposed by Ramalingam and Reps in [30] and by
                 Frigioni, Marchetti-Spaccamela and Nanni in [18],
                 respectively. The main goal of this paper is to provide
                 a first experimental evidence for: (a) the
                 effectiveness of dynamic algorithms for shortest paths
                 with respect to a traditional static approach to this
                 problem; (b) the validity of the theoretical model of
                 output boundedness to analyze dynamic graph algorithms.
                 Beside random generated graphs, useful to capture the
                 'asymptotic' behavior of the algorithms, we also
                 developed experiments by considering a widely used
                 graph from the real world, i.e., the Internet graph.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithms; experimental analysis of
                 algorithms; shortest paths",
}

@Article{Magun:1998:GMA,
  author =       "Jakob Magun",
  title =        "Greeding matching algorithms, an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297131",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct an experimental study of several greedy
                 algorithms for finding large matchings in graphs.
                 Further we propose a new graph reduction, called
                 $k$-Block Reduction, and present two novel algorithms
                 using extra heuristics in the matching step and
                 $k$-Block Reduction for $k = 3$. Greedy matching
                 algorithms can be used for finding a good approximation
                 of the maximum matching in a graph $G$ if no exact
                 solution is required, or as a fast preprocessing step
                 to some other matching algorithm. The studied greedy
                 algorithms run in $O(m)$. They are easy to implement
                 and their correctness and their running time are simple
                 to prove. Our experiments show that a good greedy
                 algorithm looses on average at most one edge on random
                 graphs from $G(n,p)$ with up to 10,000 vertices.
                 Furthermore the experiments show for which edge
                 densities in random graphs the maximum matching problem
                 is difficult to solve.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Andersson:1998:IR,
  author =       "Arne Andersson and Stefan Nilsson",
  title =        "Implementing radixsort",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297136",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and evaluate several optimization and
                 implementation techniques for string sorting. In
                 particular, we study a recently published radix sorting
                 algorithm, Forward radixsort, that has a provably good
                 worst-case behavior. Our experimental results indicate
                 that radix sorting is considerably faster (often more
                 than twice as fast) than comparison-based sorting
                 methods. This is true even for small input sequences.
                 We also show that it is possible to implement a
                 radixsort with good worst-case running time without
                 sacrificing average-case performance. Our
                 implementations are competitive with the best
                 previously published string sorting programs.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "adaptive radixsort; algorithms; forward radixsort;
                 radix sorting; sorting; string sorting",
}

@Article{Cherkassky:1998:APC,
  author =       "Boris V. Cherkassky and Andrew V. Goldberg and Paul
                 Martin and Joao C. Setubal and Jorge Stolfi",
  title =        "Augment or push: a computational study of bipartite
                 matching and unit-capacity flow algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297140",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct a computational study of unit capacity flow
                 and bipartite matching algorithms. Our goal is to
                 determine which variant of the push-relabel method is
                 most efficient in practice and to compare push-relabel
                 algorithms with augmenting path algorithms. We have
                 implemented and compared three push-relabel algorithms,
                 three augmenting-path algorithms (one of which is new),
                 and one augment-relabel algorithm. The depth-first
                 search augmenting path algorithm was thought to be a
                 good choice for the bipartite matching problem, but our
                 study shows that it is not robust (meaning that it is
                 not consistently fast on all or most inputs). For the
                 problems we study, our implementations of the FIFO and
                 lowest-level selection push-relabel algorithms have the
                 most robust asymptotic rate of growth and work best
                 overall. Augmenting path algorithms, although not as
                 robust, on some problem classes are faster by a
                 moderate constant factor. Our study includes several
                 new problem families and input graphs with as many as
                 $5 \times 105$ vertices.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Radzik:1998:IDT,
  author =       "Tomasz Radzik",
  title =        "Implementation of dynamic trees with in-subtree
                 operations",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/297096.297144",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe an implementation of dynamic trees with
                 'in-subtree' operations. Our implementation follows
                 Sleator and Tarjan's framework of dynamic-tree
                 implementations based on splay trees. We consider the
                 following two examples of 'in-subtree' operations. (a)
                 For a given node v, find a node with the minimum key in
                 the subtree rooted at v. (b) For a given node v, find a
                 random node with key X in the subtree rooted at v
                 (value X is fixed throughout the whole computation).
                 The first operation may provide support for edge
                 deletions in the dynamic minimum spanning tree problem.
                 The second one may be useful in local search methods
                 for degree-constrained minimum spanning tree problems.
                 We conducted experiments with our dynamic-tree
                 implementation within these two contexts, and the
                 results suggest that this implementation may lead to
                 considerably faster codes than straightforward
                 approaches do.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; design; dynamic minimum spanning tree;
                 dynamic trees; experimentation; performance; splay
                 trees",
}

@Article{Burke:1999:MAS,
  author =       "E. K. Burke and A. J. Smith",
  title =        "A memetic algorithm to schedule planned maintenance
                 for the national grid",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347801",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The combination of local search operators, problem
                 specific information and a genetic algorithm has
                 provided very good results in certain scheduling
                 problems, particularly in timetabling and maintenance
                 scheduling problems. The resulting algorithm from this
                 hybrid approach has been termed a Memetic Algorithm.
                 This paper investigates the use of such an algorithm
                 for the scheduling of transmission line maintenance for
                 a known problem that has been addressed in the
                 literature using a combination of a genetic algorithm
                 and greedy optimisers. This problem is concerned with
                 the scheduling of maintenance for an electricity
                 transmission network where every transmission line must
                 be maintained once within a specified time period. The
                 objective is to avoid situations where sections of the
                 network are disconnected, and to minimise the
                 overloading of lines which are in service. In this
                 paper we look at scheduling maintenance for the South
                 Wales region of the national transmission network. We
                 present and discuss, in some detail, a memetic
                 algorithm that incorporates local search operators
                 including tabu search and simulated annealing. A
                 comparison is made both with the results from previous
                 work, and against a selection of optimising techniques.
                 The approach presented in this paper shows a
                 significant improvement over previously published
                 results on previously tackled problems. We also present
                 results on another problem which has not been tackled
                 in the literature but which is closer to the real world
                 maintenance scheduling problems faced by such companies
                 as The National Grid Company plc using the South Wales
                 region.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "heuristics; hill climbing; maintenance scheduling;
                 memetic algorithms; simulated annealing; tabu search",
}

@Article{Kim:1999:NSP,
  author =       "Sun Kim",
  title =        "A new string-pattern matching algorithm using
                 partitioning and hashing efficiently",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347803",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we present a new string-pattern
                 matching algorithm that partitions the text into
                 segments of the input pattern length and searches for
                 pattern occurrences using a simple hashing scheme.
                 Unlike the well known Boyer--Moore style algorithm, our
                 algorithm does not compute variable shift length, thus
                 providing a conceptually simpler way to search for
                 patterns. Empirical evaluation shows that our algorithm
                 runs significantly faster than Sunday's and Horspool's
                 extensions of the Boyer--Moore algorithm. The notion of
                 the non-occurrence heuristic used in our algorithm,
                 together with a text partitioning scheme, leads to a
                 simplified scheme for searching for pattern
                 occurrences, thus yielding better run time
                 performance.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Eiron:1999:MMC,
  author =       "N. Eiron and M. Rodeh and I. Steinwarts",
  title =        "Matrix multiplication: a case study of enhanced data
                 cache utilization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347806",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modern machines present two challenges to algorithm
                 engineers and compiler writers: They have superscalar,
                 super-pipelined structure, and they have elaborate
                 memory subsystems specifically designed to reduce
                 latency and increase bandwidth. Matrix multiplication
                 is a classical benchmark for experimenting with
                 techniques used to exploit machine architecture and to
                 overcome the limitations of contemporary memory
                 subsystems. This research aims at advancing the state
                 of the art of algorithm engineering by balancing
                 instruction level parallelism, two levels of data
                 tiling, copying to provably avoid any cache conflicts,
                 and prefetching in parallel to computational
                 operations, in order to fully exploit the memory
                 bandwidth. Measurements on IBM's RS/6000 43P
                 workstation show that the resultant matrix
                 multiplication algorithm outperforms IBM's ESSL by
                 6.8-31.8\%, is less sensitive to the size of the input
                 data, and scales better. In this paper we introduce a
                 cache aware algorithm for matrix multiplication. We
                 also suggest generic guidelines that may be applied to
                 compute intensive algorithm to efficiently utilize the
                 data cache. We believe that some of our concepts may be
                 embodied in compilers.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; BLAS; blocking; cache; matrix
                 multiplication; performance; prefetching",
}

@Article{Erlebach:1999:EIO,
  author =       "T. Erlebach and K. Jansen",
  title =        "Efficient implementation of an optimal greedy
                 algorithm for wavelength assignment in directed tree
                 networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347808",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In all-optical networks with wavelength-division
                 multiplexing several connections can share a physical
                 link if the signals are transmitted on different
                 wavelengths. As the number of available wavelengths is
                 limited in practice, it is important to find wavelength
                 assignments minimizing the number of different
                 wavelengths used. This path coloring problem is
                 NP-hard, and the best known polynomial-time
                 approximation algorithm for directed tree networks
                 achieves approximation ratio $5 / 3$, which is optimal
                 in the class of greedy algorithms for this problem. It
                 is shown how the algorithm can be modified in order to
                 improve its running-time to $O({\rm Tec}(N,L))$ for
                 sets of paths with maximum load $L$ in trees with $N$
                 nodes, where ${\rm Tec}(n, k)$ is the time for
                 edge-coloring a $k$-regular bipartite graph with n
                 nodes. An implementation of this efficient version of
                 the algorithm in C++ using the LEDA class library is
                 described, and experimental results regarding the
                 running-times and the number of wavelengths used are
                 reported. An additional heuristic that reduces the
                 number of wavelengths used in the average case while
                 maintaining the worst-case bound of $5 L / 3$ is
                 described.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; bipartite edge coloring; directed tree
                 networks; experimentation; path coloring",
}

@Article{Huson:1999:HTR,
  author =       "D. Huson and S. Nettles and K. Rice and T. Warnow and
                 S. Yooseph",
  title =        "Hybrid tree reconstruction methods",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347812",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A major computational problem in biology is the
                 reconstruction of evolutionary trees for species sets,
                 and accuracy is measured by comparing the topologies of
                 the reconstructed tree and the model tree. One of the
                 major debates in the field is whether large
                 evolutionary trees can be even approximately accurately
                 reconstructed from biomolecular sequences of
                 realistically bounded lengths (up to about 2000
                 nucleotides) using standard techniques (polynomial-time
                 distance methods, and heuristics for NP-hard
                 optimization problems). Using both analytical and
                 experimental techniques, we show that on large trees,
                 the two most popular methods in systematic biology,
                 Neighbor-Joining and Maximum Parsimony heuristics, as
                 well as two promising methods introduced by theoretical
                 computer scientists, are all likely to have significant
                 errors in the topology reconstruction of the model
                 tree. We also present a new general technique for
                 combining outputs of different methods (thus producing
                 hybrid methods), and show experimentally how one such
                 hybrid method has better performance than its
                 constituent parts.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Jacob:1999:CSR,
  author =       "R. Jacob and M. Marathe and K. Nagel",
  title =        "A computational study of routing algorithms for
                 realistic transportation networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347814",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We carry out an experimental analysis of a number of
                 shortest-path (routing) algorithms investigated in the
                 context of the TRANSIMS (TRansportation ANalysis and
                 SIMulation System) project. The main focus of the paper
                 is to study how various heuristic as well as exact
                 solutions and associated data structures affect the
                 computational performance of the software developed for
                 realistic transportation networks. For this purpose we
                 have used a road network representing, with high degree
                 of resolution, the Dallas Fort-Worth urban area. We
                 discuss and experimentally analyze various one-to-one
                 shortest-path algorithms. These include classical exact
                 algorithms studied in the literature as well as
                 heuristic solutions that are designed to take into
                 account the geometric structure of the input instances.
                 Computational results are provided to compare
                 empirically the efficiency of various algorithms. Our
                 studies indicate that a modified Dijkstra's algorithm
                 is computationally fast and an excellent candidate for
                 use in various transportation planning applications as
                 well as ITS related technologies.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "design and analysis of algorithms; experimental
                 analysis; network design; shortest-paths algorithms;
                 transportation planning",
}

@Article{Muller-Hannemann:1999:IWM,
  author =       "M. M{\"u}ller-Hannemann and A. Schwartz",
  title =        "Implementing weighted $b$-matching algorithms: towards
                 a flexible software design",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347815",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a case study on the design of an
                 implementation of a fundamental combinatorial
                 optimization problem: weighted b-matching. Although
                 this problem is well-understood in theory and efficient
                 algorithms are known, only little experience with
                 implementations is available. This study was motivated
                 by the practical need for an efficient b-matching
                 solver as a subroutine in our approach to a mesh
                 refinement problem in computer-aided design (CAD).The
                 intent of this paper is to demonstrate the importance
                 of flexibility and adaptability in the design of
                 complex algorithms, but also to discuss how such goals
                 can be achieved for matching algorithms by the use of
                 design patterns. Starting from the basis of the famous
                 blossom algorithm we explain how to exploit in
                 different ways the flexibility of our software design
                 which allows an incremental improvement of efficiency
                 by exchanging subalgorithms and data structures. In a
                 comparison with a code by Miller and Pekny we also
                 demonstrate that our implementation is even without
                 fine-tuning very competitive. Our code is significantly
                 faster, with improvement factors ranging between 15 and
                 466 on TSPLIB instances.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; b-matching; blossom algorithm; design
                 patterns; experimentation; object-oriented design;
                 software design",
}

@Article{Schwerdt:1999:CWT,
  author =       "J. Schwerdt and M. Smid and J. Majhi and R. Janardan",
  title =        "Computing the width of a three-dimensional point set:
                 an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/347792.347816",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe a robust, exact, and efficient
                 implementation of an algorithm that computes the width
                 of a three-dimensional point set. The algorithm is
                 based on efficient solutions to problems that are at
                 the heart of computational geometry: three-dimensional
                 convex hulls, point location in planar graphs, and
                 computing intersections between line segments. The
                 latter two problems have to be solved for planar graphs
                 and segments on the unit sphere, rather than in the
                 two-dimensional plane. The implementation is based on
                 LEDA, and the geometric objects are represented using
                 exact rational arithmetic.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "computational geometry; implementation; layered
                 manufacturing; spherical geometry",
}

@Article{Eppstein:2000:FHC,
  author =       "David Eppstein",
  title =        "Fast hierarchical clustering and other applications of
                 dynamic closest pairs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.351829",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We develop data structures for dynamic closest pair
                 problems with arbitrary distance functions, that do not
                 necessarily come from any geometric structure on the
                 objects. Based on a technique previously used by the
                 author for Euclidean closest pairs, we show how to
                 insert and delete objects from an n-object set,
                 maintaining the closest pair, in $O(n \log^2 n)$ time
                 per update and $O(n)$ space. With quadratic space, we
                 can instead use a quadtree-like structure to achieve an
                 optimal time bound, $O(n)$ per update. We apply these
                 data structures to hierarchical clustering, greedy
                 matching, and TSP heuristics, and discuss other
                 potential applications in machine learning, Gr{\"o}bner
                 bases, and local improvement algorithms for partition
                 and placement problems. Experiments show our new
                 methods to be faster in practice than previously used
                 heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "conga line data structure; matching; nearest-neighbor
                 heuristic; quadtree; TSP",
}

@Article{Chong:2000:CBD,
  author =       "Kyn-Rak Chong and Sartaj Sahni",
  title =        "Correspondence-based data structures for double-ended
                 priority queues",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.351828",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe three general methods--total, dual, and
                 leaf correspondence--that may be used to derive
                 efficient double-ended priority queues from
                 single-ended priority queues. These methods are
                 illustrated by developing double-ended priority queues
                 based on the classical heap. Experimental results
                 indicate that the leaf-correspondence method generally
                 leads to a faster double-ended priority queue than
                 either total or dual correspondence. On randomly
                 generated test sets, however, the splay tree
                 outperforms the tested correspondence-based
                 double-ended priority queues.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "correspondence-based data structures; double-ended
                 priority queues; heaps; leftist trees; runtime
                 efficiency; splay trees",
}

@Article{Xiao:2000:IMP,
  author =       "Li Xiao and Xiaodong Zhang and Stefan A. Kubricht",
  title =        "Improving memory performance of sorting algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384245",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Memory hierarchy considerations during sorting
                 algorithm design and implementation play an important
                 role in significantly improving execution performance.
                 Existing algorithms mainly attempt to reduce capacity
                 misses on direct-mapped caches. To reduce other types
                 of cache misses that occur in the more common
                 set-associative caches and the TLB, we restructure the
                 mergesort and quicksort algorithms further by
                 integrating tiling, padding, and buffering techniques
                 and by repartitioning the data set. Our study shows
                 that substantial performance improvements can be
                 obtained using our new methods.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "caches; memory performance; mergesort; quicksort;
                 TLB",
}

@Article{Navarro:2000:FFS,
  author =       "Gonzalo Navarro and Mathieu Raffinot",
  title =        "Fast and flexible string matching by combining
                 bit-parallelism and suffix automata",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384246",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The most important features of a string matching
                 algorithm are its efficiency and its flexibility.
                 Efficiency has traditionally received more attention,
                 while flexibility in the search pattern is becoming a
                 more and more important issue. Most classical string
                 matching algorithms are aimed at quickly finding an
                 exact pattern in a text, being Knuth--Morris--Pratt
                 (KMP) and the Boyer--Moore (BM) family the most famous
                 ones. A recent development uses deterministic 'suffix
                 automata' to design new optimal string matching
                 algorithms, e.g. BDM and TurboBDM. Flexibility has been
                 addressed quite separately by the use of
                 'bit-parallelism', which simulates automata in their
                 nondeterministic form by using bits and exploiting the
                 intrinsic parallelism inside the computer word, e.g.
                 the Shift-Or algorithm. Those algorithms are extended
                 to handle classes of characters and errors in the
                 pattern and/or in the text, their drawback being their
                 inability to skip text characters. In this paper we
                 merge bit-parallelism and suffix automata, so that a
                 nondeterministic suffix automaton is simulated using
                 bit-parallelism. The resulting algorithm, called BNDM,
                 obtains the best from both worlds. It is much simpler
                 to implement than BDM and nearly as simple as Shift-Or.
                 It inherits from Shift-Or the ability to handle
                 flexible patterns and from BDM the ability to skip
                 characters. BNDM is 30\%-40\% faster than BDM and up to
                 7 times faster than Shift-Or. When compared to the
                 fastest existing algorithms on exact patterns (which
                 belong to the BM family), BNDM is from 20\% slower to 3
                 times faster, depending on the alphabet size. With
                 respect to flexible pattern searching, BNDM is by far
                 the fastest technique to deal with classes of
                 characters and is competitive to search allowing
                 errors. In particular, BNDM seems very adequate for
                 computational biology applications, since it is the
                 fastest algorithm to search on DNA sequences and
                 flexible searching is an important problem in that
                 area. As a theoretical development related to flexible
                 pattern matching, we introduce a new automaton to
                 recognize suffixes of patterns with classes of
                 characters. To the best of our knowledge, this
                 automaton has not been studied before.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Caldwell:2000:DIM,
  author =       "Andrew E. Caldwell and Andrew B. Kahng and Igor L.
                 Markov",
  title =        "Design and implementation of move-based heuristics for
                 {VLSI} hypergraph partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384247",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We summarize the techniques of implementing move-based
                 hypergraph partitioning heuristics and evaluating their
                 performance in the context of VLSI design applications.
                 Our first contribution is a detailed software
                 architecture, consisting of seven reusable components,
                 that allows flexible, efficient and accurate assessment
                 of the practical implications of new move-based
                 algorithms and partitioning formulations. Our second
                 contribution is an assessment of the modern context for
                 hypergraph partitioning research for VLSI design
                 applications. In particular, we discuss the current
                 level of sophistication in implementation know-how and
                 experimental evaluation, and we note how requirements
                 for real-world partitioners --- if used as motivation
                 for research --- should affect the evaluation of
                 prospective contributions. Two 'implicit decisions' in
                 the implementation of the Fiduccia-Mattheyses heuristic
                 are used to illustrate the difficulty of achieving
                 meaningful experimental evaluation of new algorithmic
                 ideas.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; balanced min-cut hypergraph partitioning;
                 experimentation; measurement; performance; VLSI CAD",
}

@Article{Levine:2000:FRC,
  author =       "Matthew S. Levine",
  title =        "Finding the right cutting planes for the {TSP}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384248",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given an instance of the Traveling Salesman Problem
                 (TSP), a reasonable way to get a lower bound on the
                 optimal answer is to solve a linear programming
                 relaxation of an integer programming formulation of the
                 problem. These linear programs typically have an
                 exponential number of constraints, but in theory they
                 can be solved efficiently with the ellipsoid method as
                 long as we have an algorithm that can take a solution
                 and either declare it feasible or find a violated
                 constraint. In practice, it is often the case that many
                 constraints are violated, which raises the question of
                 how to choose among them so as to improve performance.
                 For the simplest TSP formulation it is possible to
                 efficiently find all the violated constraints, which
                 gives us a good chance to try to answer this question
                 empirically. Looking at random two dimensional
                 Euclidean instances and the large instances from
                 TSPLIB, we ran experiments to evaluate several
                 strategies for picking among the violated constraints.
                 We found some information about which constraints to
                 prefer, which resulted in modest gains, but were unable
                 to get large improvements in performance.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; combinatorial optimization; cutting plane;
                 experimentation; minimum cut; performance; traveling
                 salesman problem",
}

@Article{Sanders:2000:FPQ,
  author =       "Peter Sanders",
  title =        "Fast priority queues for cached memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384249",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The cache hierarchy prevalent in todays high
                 performance processors has to be taken into account in
                 order to design algorithms that perform well in
                 practice. This paper advocates the adaption of external
                 memory algorithms to this purpose. This idea and the
                 practical issues involved are exemplified by
                 engineering a fast priority queue suited to external
                 memory and cached memory that is based on k-way
                 merging. It improves previous external memory
                 algorithms by constant factors crucial for transferring
                 it to cached memory. Running in the cache hierarchy of
                 a workstation the algorithm is at least two times
                 faster than an optimized implementation of binary heaps
                 and 4-ary heaps for large inputs.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; cache efficiency; data structure; external
                 memory; heap; implementation; multi way merging;
                 priority queue; secondary storage",
}

@Article{Muller-Hannemann:2000:IWM,
  author =       "Matthias M{\"u}ller-Hannemann and Alexander Schwartz",
  title =        "Implementing weighted $b$-matching algorithms:
                 insights from a computational study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384250",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of an implementation
                 of weighted perfect b-matching based on the primal-dual
                 blossom algorithm. Although this problem is
                 well-understood in theory and efficient algorithms are
                 known, only little experience with implementations is
                 available. In this paper several algorithmic variants
                 are compared on synthetic and application problem data
                 of very sparse graphs. This study was motivated by the
                 practical need for an efficient b-matching solver for
                 the latter application, namely as a subroutine in our
                 approach to a mesh refinement problem in computer-aided
                 design (CAD).Linear regression and operation counting
                 is used to analyze code variants. The experiments
                 confirm that a fractional jump-start speeds up the
                 algorithm, they indicate that a variant based on
                 pairing heaps is slightly superior to a k-heap variant,
                 and that scaling of large b-values is not necessary,
                 whereas a delayed blossom shrinking heuristic
                 significantly improves running times only for graphs
                 with average degree two. The fastest variant of our
                 implementation appears to be highly superior to a code
                 by Miller and Pekny (1995).",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "b-matching; blossom algorithm; operation counting",
}

@Article{Shibuya:2000:CSP,
  author =       "Tetsuo Shibuya",
  title =        "Computing the $n \times m$ shortest path efficiently",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384251",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Computation of all the shortest paths between multiple
                 sources and multiple destinations on various networks
                 is required in many problems, such as the traveling
                 salesperson problem (TSP) and the vehicle routing
                 problem (VRP). This paper proposes new algorithms that
                 compute the set of shortest paths efficiently by using
                 the A* algorithm. The efficiency and properties of
                 these algorithms are examined by using the results of
                 experiments on an actual road network.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "*{$<$} m shortest paths; algorithm; algorithms; A{$<$}
                 experimentation; GIS; n \times /sup{$>$} sup&gt",
}

@Article{Vishkin:2000:ELR,
  author =       "Dascal Vishkin and Uzi Vishkin",
  title =        "Experiments with list ranking for explicit
                 multi-threaded {(XMT)} instruction parallelism",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "10:1--10:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384252",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Algorithms for the problem of list ranking are
                 empirically studied with respect to the Explicit
                 Multi-Threaded (XMT) platform for instruction-level
                 parallelism (ILP). The main goal of this study is to
                 understand the differences between XMT and more
                 traditional parallel computing implementation
                 platforms/models as they pertain to the well studied
                 list ranking problem. The main two findings are: (i)
                 good speedups for much smaller inputs are possible and
                 (ii) in part, the first finding is based on a new
                 variant of a 1984 algorithm, called the No-Cut
                 algorithm. The paper incorporates analytic
                 (non-asymptotic) performance analysis into experimental
                 performance analysis for relatively small inputs. This
                 provides an interesting example where experimental
                 research and theoretical analysis complement one
                 another. Explicit Multi-Threading (XMT) is a
                 fine-grained computation framework introduced in our
                 SPAA'98 paper. Building on some key ideas of parallel
                 computing, XMT covers the spectrum from algorithms
                 through architecture to implementation; the main
                 implementation related innovation in XMT was through
                 the incorporation of low-overhead hardware and software
                 mechanisms (for more effective fine-grained
                 parallelism). The reader is referred to that paper for
                 detail on these mechanisms. The XMT platform aims at
                 faster single-task completion time by way of ILP.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Werneck:2000:FMC,
  author =       "Renato Werneck and Jo{\~a}o Setubal and Arlindo da
                 Conceic{\~a}o",
  title =        "Finding minimum congestion spanning trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "11:1--11:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384253",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a weighted graph $G = (V, E)$, a positive
                 integer $k$, and a penalty function $w_p$, we want to
                 find $k$ spanning trees on $G$, not necessarily
                 disjoint, of minimum total weight, such that the weight
                 of each edge is subject to a penalty given by $w_p$ if
                 it belongs to more than one tree. The objective
                 function to be minimized is $\sum_{e \in E} W_e(i_e)$,
                 where $i_e$ is the number of times edge $e$ appears in
                 the solution and $W_e(i_e) = i_e w_p(e, i_e)$ is the
                 aggregate cost of using edge $e$ $i_e$ times. For the
                 case when $W_e$ is weakly convex, which should have
                 wide application in congestion problems, we present a
                 polynomial time algorithm; the algorithm's complexity
                 is quadratic in $k$. We also present two heuristics
                 with complexity linear in $k$. In an experimental study
                 we show that these heuristics are much faster than the
                 exact algorithm also in practice. These experiments
                 present a diverse combination of input families (four),
                 varying $k$ (up to 1000), and penalty functions (two).
                 In most inputs tested the solutions given by the
                 heuristics were within 1\% of optimal or much better,
                 especially for large $k$. The worst quality observed
                 was 3.2\% of optimal.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Schulz:2000:DAL,
  author =       "Frank Schulz and Dorothea Wagner and Karsten Weihe",
  title =        "{Dijkstra}'s algorithm on-line: an empirical case
                 study from public railroad transport",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "12:1--12:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384254",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Traffic information systems are among the most
                 prominent real-world applications of Dijkstra's
                 algorithm for shortest paths. We consider the scenario
                 of a central information server in the realm of public
                 railroad transport on wide-area networks. Such a system
                 has to process a large number of on-line queries for
                 optimal travel connections in real time. In practice,
                 this problem is usually solved by heuristic variations
                 of Dijkstra's algorithm, which do not guarantee an
                 optimal result. We report results from a pilot study,
                 in which we focused on the travel time as the only
                 optimization criterion. In this study, various speed-up
                 techniques for Dijkstra's algorithm were analysed
                 empirically. This analysis was based on the timetable
                 data of all German trains and on a 'snapshot' of half a
                 million customer queries.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Flato:2000:DIP,
  author =       "Eyal Flato and Dan Halperin and Iddo Hanniel and Oren
                 Nechushtan and Eti Ezra",
  title =        "The design and implementation of planar maps in
                 {CGAL}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "13:1--13:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384255",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Planar maps are fundamental structures in
                 computational geometry. They are used to represent the
                 subdivision of the plane into regions and have numerous
                 applications. We describe the planar map package of
                 CGAL--a Computational Geometry Algorithms Library. We
                 discuss its modular design and implementation. In
                 particular we introduce the two main classes of the
                 design--planar maps and topological maps that enable
                 the convenient separation between geometry and
                 topology. The modular design is implemented using a
                 generic programming approach. By switching a template
                 parameter--the geometric traits class, one can use the
                 same code for planar maps of different objects such as
                 line segments or circular arcs. More flexibility is
                 achieved by choosing a point location algorithm out of
                 three implemented algorithms or plugging in an
                 algorithm implemented by the user. The user of the
                 planar maps package can benefit both from its
                 flexibility and robustness. We present several examples
                 of geometric traits classes and point location
                 algorithms which demonstrate the possibility to adapt
                 the general package to specific needs.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rahman:2000:ACE,
  author =       "Naila Rahman and Rajeev Raman",
  title =        "Analysing cache effects in distribution sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "14:1--14:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384256",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study cache effects in distribution sorting
                 algorithms for sorting keys drawn independently at
                 random from a uniform distribution (`uniform keys'). We
                 note that the performance of a recently-published
                 distribution sorting algorithm, Flashsort1, which sorts
                 $n$ uniform floating-point keys in $O(n)$ expected
                 time, does not scale well with the input size due to
                 poor cache utilisation. We present an approximate
                 analysis for distribution sorting uniform keys which,
                 as validated by simulation results, predicts the
                 expected cache misses of Flashsort1 quite well. Using
                 this analysis, we design a multiple-pass variant of
                 Flashsort1 which outperforms Flashsort1 and
                 comparison-based algorithms on uniform floating-point
                 keys for moderate to large values of $n$. Using
                 experimental results we also show that the integer
                 distribution sorting algorithm MSB radix sort performs
                 well on both uniform integer and uniform floating-point
                 keys.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; efficient sorting algorithms; external-memory
                 algorithms; memory hierarchy",
}

@Article{Bojesen:2000:PEC,
  author =       "Jesper Bojesen and Jyrki Katajainen and Maz Spork",
  title =        "Performance engineering case study: heap
                 construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "15:1--15:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384257",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The behaviour of three methods for constructing a
                 binary heap on a computer with a hierarchical memory is
                 studied. The methods considered are the original one
                 proposed by Williams [1964], in which elements are
                 repeatedly inserted into a single heap; the improvement
                 by Floyd [1964], in which small heaps are repeatedly
                 merged to bigger heaps; and a recent method proposed,
                 e.g., by Fadel et al. [1999] in which a heap is built
                 layerwise. Both the worst-case number of instructions
                 and that of cache misses are analysed. It is well-known
                 that Floyd's method has the best instruction count. Let
                 N denote the size of the heap to be constructed, B the
                 number of elements that fit into a cache line, and let
                 c and d be some positive constants. Our analysis shows
                 that, under reasonable assumptions, repeated insertion
                 and layerwise construction both incur at most cN/B
                 cache misses, whereas repeated merging, as programmed
                 by Floyd, can incur more than (dN log2 B)/B cache
                 misses. However, for our memory-tuned versions of
                 repeated insertion and repeated merging the number of
                 cache misses incurred is close to the optimal bound
                 N/B. In addition to these theoretical findings, we
                 communicate many practical experiences which we hope to
                 be valuable for others doing experimental algorithmic
                 work.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; binary heaps; code tuning;
                 experimentation; memory tuning; performance; theory",
}

@Article{Boghossian:2000:RSP,
  author =       "N. P. Boghossian and O. Kohlbacher and H. P. Lenhof",
  title =        "Rapid software prototyping in molecular modeling using
                 the biochemical algorithms library {(BALL)}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "16:1--16:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384258",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the next century, virtual laboratories will play a
                 key role in biotechnology. Computer experiments will
                 not only replace some of the time-consuming and
                 expensive real-world experiments, but they will also
                 provide insights that cannot be obtained using 'wet'
                 experiments. The field that deals with the modeling of
                 atoms, molecules, and their reactions is called
                 Molecular Modeling. The advent of Life Sciences gave
                 rise to numerous new developments in this area.
                 However, the implementation of new simulation tools is
                 extremely time-consuming. This is mainly due to the
                 large amount of supporting code that is required in
                 addition to the code necessary to implement the new
                 idea. The only way to reduce the development time is to
                 reuse reliable code, preferably using object-oriented
                 approaches. We have designed and implemented BALL, the
                 first object-oriented application framework for rapid
                 prototyping in Molecular Modeling. By the use of the
                 composite design pattern and polymorphism we were able
                 to model the multitude of complex biochemical concepts
                 in a well-structured and comprehensible class
                 hierarchy, the BALL kernel classes. The isomorphism
                 between the biochemical structures and the kernel
                 classes leads to an intuitive interface. Since BALL was
                 designed for rapid software prototyping, ease of use,
                 extensibility, and robustness were our principal design
                 goals. Besides the kernel classes, BALL provides
                 fundamental components for import/export of data in
                 various file formats, Molecular Mechanics simulations,
                 three-dimensional visualization, and more complex ones
                 like a numerical solver for the Poisson--Boltzmann
                 equation.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "libraries; molecular modeling; rapid software
                 prototyping",
}

@Article{Brengel:2000:ESP,
  author =       "Klaus Brengel and Andreas Crauser and Paolo Ferragina
                 and Ulrich Meyer",
  title =        "An experimental study of priority queues in external
                 memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "17:1--17:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/351827.384259",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we compare the performance of eight
                 different priority queue implementations: four of them
                 are explicitly designed to work in an external-memory
                 setting, the others are standard internal-memory queues
                 available in the LEDA library [Mehlhorn and N{\"a}her
                 1999]. Two of the external-memory priority queues are
                 obtained by engineering known internal-memory priority
                 queues with the aim of achieving effective performance
                 on external storage devices (i.e., Radix heaps [Ahuja
                 et al. 1990] and array heaps [Thorup 1996]). Our
                 experimental framework includes some simple tests, like
                 random sequences of insert or delete-minimum
                 operations, as well as more advanced tests consisting
                 of intermixed sequences of update operations and
                 'application driven' update sequences originated by
                 simulations of Dijkstra's algorithm on large graph
                 instances. Our variegate spectrum of experimental
                 results gives a good picture of the features of these
                 priority queues, thus being helpful to anyone
                 interested in the use of such data structures on very
                 large data sets.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Arge:2001:EAP,
  author =       "Lars Arge and Laura Toma and Jeffrey Scott Vitter",
  title =        "{I/O}-Efficient Algorithms for Problems on Grid-Based
                 Terrains",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945395",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The potential and use of Geographic Information
                 Systems is rapidly increasing due to the increasing
                 availability of massive amounts of geospatial data from
                 projects like NASA's Mission to Planet Earth. However,
                 the use of these massive datasets also exposes
                 scalability problems with existing GIS algorithms.
                 These scalability problems are mainly due to the fact
                 that most GIS algorithms have been designed to minimize
                 internal computation time, while I/O communication
                 often is the bottleneck when processing massive amounts
                 of data. In this paper, we consider I/O-efficient
                 algorithms for problems on grid-based terrains.
                 Detailed grid-based terrain data is rapidly becoming
                 available for much of the Earth's surface. We describe
                 [EQUATION] I/O algorithms for several problems on
                 [EQUATION] grids for which only $O(N)$ algorithms were
                 previously known. Here $M$ denotes the size of the main
                 memory and $B$ the size of a disk block. We demonstrate
                 the practical merits of our work by comparing the
                 empirical performance of our new algorithm for the {\em
                 flow accumulation\/} problem with that of the
                 previously best known algorithm. Flow accumulation,
                 which models flow of water through a terrain, is one of
                 the most basic hydrologic attributes of a terrain. We
                 present the results of an extensive set of experiments
                 on real-life terrain datasets of different sizes and
                 characteristics. Our experiments show that while our
                 new algorithm scales nicely with dataset size, the
                 previously known algorithm 'breaks down' once the size
                 of the dataset becomes bigger than the available main
                 memory. For example, while our algorithm computes the
                 flow accumulation for the Appalachian Mountains in
                 about three hours, the previously known algorithm takes
                 several weeks.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Demestrescu:2001:BCM,
  author =       "Camil Demestrescu and Irene Finocchi",
  title =        "Breaking cycles for minimizing crossings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945396",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the one-sided crossing minimization
                 problem (CP): given a bipartite graph $G$ and a
                 permutation $x_0$ of the vertices on a layer, find a
                 permutation $x_1$ of the vertices on the other layer
                 which minimizes the number of edge crossings in any
                 straightline drawing of $G$ where vertices are placed
                 on two parallel lines and sorted according to $x_0$ and
                 $x_1$. Solving CP represents a fundamental step in the
                 construction of aesthetically pleasing layouts of
                 hierarchies and directed graphs, but unfortunately this
                 problem has been proved to be NP-complete.\par

                 In this paper we address the strong relation between CP
                 and the problem of computing minimum feedback arc sets
                 in directed graphs and we devise a new approximation
                 algorithm for CP, called PM, that exploits this
                 dependency. We experimentally and visually compare the
                 performance of PM with the performance of well-known
                 algorithms and of recent attractive strategies.
                 Experiments are carried out on different families of
                 randomly generated graphs, on pathological instances,
                 and on real test sets. Performance indicators include
                 both number of edge crossings and running time, as well
                 as structural measures of the problem instances. We
                 found CP to be a very interesting and rich problem from
                 a combinatorial point of view. Our results clearly
                 separate the behavior of the algorithms, proving the
                 effectiveness of PM on most test sets and showing
                 tradeoffs between quality of the solutions and running
                 time. However, if the visual complexity of the drawings
                 is considered, we found no clear winner. This confirms
                 the importance of optimizing also other aesthetic
                 criteria such as symmetry, edge length, and angular
                 resolution.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bipartite graphs; crossing minimization; experimental
                 algorithms",
}

@Article{Gabow:2001:NFB,
  author =       "Harold Gabow and Tadayoshi Kohno",
  title =        "A Network-Flow-Based Scheduler: Design, Performance
                 History, and Experimental Analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945397",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe a program that schedules physician
                 attending teams at Denver Health Medical Center. The
                 program uses network flow techniques to prune an
                 exponentially sized search space. We describe the
                 program design, its performance history at the
                 hospital, and experiments on a simplified version of
                 the program.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graphs; matroids; scheduling",
}

@Article{Iyer:2001:ESP,
  author =       "Raj Iyer and David Karger and Hariharan Rahul and
                 Mikkel Thorup",
  title =        "An Experimental Study of Polylogarithmic, Fully
                 Dynamic, Connectivity Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945398",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of different variants
                 of the amortized $O(\log^n)$-time fully-dynamic
                 connectivity algorithm of Holm, de Lichtenberg, and
                 Thorup (STOC'98). The experiments build upon
                 experiments provided by Alberts, Cattaneo, and Italiano
                 (SODA'96) on the randomized amortized $O(\log^3 n)$
                 fully-dynamic connectivity algorithm of Henzinger and
                 King (STOC'95). Our experiments shed light upon
                 similarities and differences between the two
                 algorithms. We also present a slightly modified version
                 of the Henzinger--King algorithm that runs in $O(\log^2
                 n)$ time, which resulted from our experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Liberatore:2001:CSB,
  author =       "Vincenzo Liberatore",
  title =        "Caching and Scheduling for Broadcast Disk Systems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945399",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Unicast connections lead to performance and
                 scalability problems when a large client population
                 attempts to access the same data. Broadcast push and
                 broadcast disk technology address the problem by
                 broadcasting data items from a server to a large number
                 of clients. Broadcast disk performance depends mainly
                 on caching strategies at the client site and on how the
                 broadcast is scheduled at the server site. An on-line
                 broadcast disk paging strategy makes caching decisions
                 without knowing future page requests or access
                 probabilities. This paper gives new implementations of
                 existing on-line algorithms and reports on extensive
                 empirical investigations. The gray algorithm [Khanna
                 and Liberatore 2000] always outperformed other on-line
                 strategies on both synthetic and Web traces. Moreover,
                 caching limited the skewness of broadcast schedules,
                 and led to favor efficient caching algorithms over
                 refined scheduling strategies when the cache was
                 large.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "broadcast disk; caching; scheduling",
}

@Article{Narasimhan:2001:GMS,
  author =       "Giri Narasimhan and Martin Zachariasen",
  title =        "Geometric Minimum Spanning Trees via Well-Separated
                 Pair Decompositions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945400",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Let $S$ be a set of $n$ points in $\Re^d$. We present
                 an algorithm that uses the well-separated pair
                 decomposition and computes the minimum spanning tree of
                 $S$ under any $L_p$ or polyhedral metric. A theoretical
                 analysis shows that it has an expected running time of
                 $O(n \log n)$ for uniform point distributions; this is
                 verified experimentally. Extensive experimental results
                 show that this approach is practical. Under a variety
                 of input distributions, the resulting implementation is
                 robust and performs well for points in higher
                 dimensional space.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rahman:2001:ARS,
  author =       "Naila Rahman and Rajeev Raman",
  title =        "Adapting Radix Sort to the Memory Hierarchy",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945401",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We demonstrate the importance of reducing misses in
                 the translation-lookaside buffer (TLB) for obtaining
                 good performance on modern computer architectures. We
                 focus on least-significant bit first (LSB) radix sort,
                 standard implementations of which make many TLB misses.
                 We give three techniques which simultaneously reduce
                 cache and TLB misses for LSB radix sort: reducing
                 working set size, explicit block transfer and
                 pre-sorting. We note that: \item All the techniques
                 above yield algorithms whose implementations outperform
                 optimised cache-tuned implementations of LSB radix sort
                 and comparison-based sorting algorithms. The fastest
                 running times are obtained by the pre-sorting approach
                 and these are over twice as fast as optimised
                 cache-tuned implementations of LSB radix sort and
                 quicksort. Even the simplest optimisation, using the
                 TLB size to guide the choice of radix in standard
                 implementations of LSB radix sort, gives good
                 improvements over cache-tuned algorithms. \item One of
                 the pre-sorting algorithms and explicit block transfer
                 make few cache and TLB misses in the worst case. This
                 is not true of standard implementations of LSB radix
                 sort. We also apply these techniques to the problem of
                 permuting an array of integers, and obtain gains of
                 over 30\% relative to the naive algorithm by using
                 explicit block transfer.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; efficient sorting algorithms; external-memory
                 algorithms; locality of reference; memory hierarchy;
                 radix sort; translation-lookaside buffer (TLB)",
}

@Article{Stallmann:2001:HES,
  author =       "Matthias Stallmann and Franc Brglez and Debabrata
                 Ghosh",
  title =        "Heuristics, Experimental Subjects, and Treatment
                 Evaluation in Bigraph Crossing Minimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945402",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The bigraph crossing problem, embedding the two node
                 sets of a bipartite graph along two parallel lines so
                 that edge crossings are minimized, has applications to
                 circuit layout and graph drawing. Experimental results
                 for several previously known and two new heuristics
                 suggest continued exploration of the problem,
                 particularly sparse instances. We emphasize careful
                 design of experimental subject classes and present
                 novel views of the results. All source code, data, and
                 scripts are available on-line",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "crossing number; design of experiments; graph drawing;
                 graph embedding; graph equivalence classes; layout",
}

@Article{Frigioni:2001:ESD,
  author =       "Daniele Frigioni and Tobias Miller and Christos
                 Zaroliagis",
  title =        "An Experimental Study of Dynamic Algorithms for
                 Transitive Closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945403",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We perform an extensive experimental study of several
                 dynamic algorithms for transitive closure. In
                 particular, we implemented algorithms given by
                 Italiano, Yellin, Cicerone et al., and two recent
                 randomized algorithms by Henzinger and King. We propose
                 a fine-tuned version of Italiano's algorithms as well
                 as a new variant of them, both of which were always
                 faster than any of the other implementations of the
                 dynamic algorithms. We also considered simple-minded
                 algorithms that were easy to implement and likely to be
                 fast in practice. Wetested and compared the above
                 implementations on random inputs, on non-random inputs
                 that are worst-case inputs for the dynamic algorithms,
                 and on an input motivated by a real-world graph.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithm; experimentation; transitive
                 closure",
}

@Article{Matias:2001:EFP,
  author =       "Yossi Matias and Nasir Rajpoot and Cenk Sahinalp",
  title =        "The Effect of Flexible Parsing for Dynamic
                 Dictionary-Based Data Compression",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "10:1--10:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/945394.945404",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We report on the performance evaluation of greedy
                 parsing with a single step lookahead (which we call
                 flexible Parsing or {\em FP\/}) as an alternative to
                 the commonly used greedy parsing (with no-lookaheads)
                 scheme. Greedy parsing is the basis of most popular
                 compression programs including UNIX {\tt compress} and
                 {\tt gzip}, however it usually results in far from
                 optimal parsing\slash compression with regard to the
                 dictionary construction scheme in use. Flexible
                 parsing, however, is optimal [MS99], i.e. partitions
                 any given input to the smallest number of phrases
                 possible, for dictionary construction schemes which
                 satisfy the prefix property throughout their
                 execution.\par

                 We focus on the application of {\em FP\/} in the
                 context of the LZW variant of the Lempel--Ziv'78
                 dictionary construction method [Wel84, ZL78], which is
                 of considerable practical interest. We implement two
                 compression algorithms which use (1) {\em FP\/} with
                 LZW dictionary (LZW-{\em FP\/}), and (2) {\em FP\/}
                 with an alternative flexible dictionary (FPA as
                 introduced in [Hor95]). Our implementations are based
                 on novel on-line data structures enabling us to use
                 linear time and space. We test our implementations on a
                 collection of input sequences which includes textual
                 files, DNA sequences, medical images, and pseudorandom
                 binary files, and compare our results with two of the
                 most popular compression programs UNIX {\tt compress}
                 and {\tt gzip}. Our results demonstrate that flexible
                 parsing is especially useful for non-textual data, on
                 which it improves over the compression rates of {\tt
                 compress} and {\tt gzip} by up to 20\% and 35\%,
                 respectively.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Backes:2002:HLB,
  author =       "Werner Backes and Susanne Wetzel",
  title =        "Heuristics on lattice basis reduction in practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "1--1",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944619",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we provide a survey on LLL lattice basis
                 reduction in practice. We introduce several new
                 heuristics as to speed up known lattice basis reduction
                 methods and improve the quality of the computed reduced
                 lattice basis in practice. We analyze substantial
                 experimental data and to our knowledge, we are the
                 first to present general heuristics for determining
                 which variant of the reduction algorithm, for varied
                 parameter choices, yields the most efficient reduction
                 strategy for reducing a particular problem instance.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic approximation; general reduction heuristics;
                 lattice basis reduction; modular and iterative
                 heuristics",
}

@Article{Iwama:2002:PLS,
  author =       "Kazuo Iwama and Daisuke Kawai and Shuichi Miyazaki and
                 Yasuo Okabe and Jun Umemoto",
  title =        "Parallelizing local search for {CNF} satisfiability
                 using vectorization and {PVM}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "2--2",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944620",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The purpose of this paper is to speed up the local
                 search algorithm for the CNF Satisfiability problem.
                 Our basic strategy is to run some 10$^5$ independent
                 search paths simultaneously using PVM on a vector
                 supercomputer VPP800, which consists of 40 vector
                 processors. Using the above parallelization and
                 vectorization together with some improvement of data
                 structure, we obtained 600-times speedup in terms of
                 the number of flips the local search can make per
                 second, compared to the original GSAT by Selman and
                 Kautz. We ran our parallel GSAT for benchmark instances
                 and compared the running time with those of existing
                 SAT programs. We could observe an apparent benefit of
                 parallelization: Especially, we were able to solve two
                 instances that have never been solved before this
                 paper. We also tested parallel local search for the SAT
                 encoding of the class scheduling problem. Again we were
                 able to get almost the best answer in reasonable
                 time.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; CNF Satisfiability; distributed computing;
                 experimentation; local search algorithms;
                 parallelization; PVM; vector supercomputer;
                 vectorization",
}

@Article{Albers:2002:ESO,
  author =       "Susanne Albers and Bianca Schr{\"o}der",
  title =        "An experimental study of online scheduling
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "3--3",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944621",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present the first comprehensive experimental study
                 of online algorithms for Graham's scheduling problem.
                 Graham's scheduling problem is a fundamental problem in
                 scheduling theory where a sequence of jobs has to be
                 scheduled on $m$ identical parallel machines so as to
                 minimize the makespan. Graham gave an elegant algorithm
                 that is $(2 - 1 / m)$-competitive. Recently a number of
                 new online algorithms were developed that achieve
                 competitive ratios around 1.9. Since competitive
                 analysis can only capture the worst case behavior of an
                 algorithm a question often asked is: Are these new
                 algorithms geared only towards a pathological case or
                 do they perform better in practice, too?We address this
                 question by analyzing the algorithms on various job
                 sequences. In our actual tests, we analyzed the
                 algorithms (1) on real world jobs and (2) on jobs
                 generated by probability distributions. It turns out
                 that the performance of the algorithms depends heavily
                 on the characteristics of the respective work load. On
                 job sequences that are generated by standard
                 probability distributions, Graham's strategy is clearly
                 the best. However, on the real world jobs the new
                 algorithms often outperform Graham's strategy. Our
                 experimental study confirms theoretical results in the
                 sense that there are also job sequences in practice on
                 which the new online algorithms perform better. Our
                 study can help to inform practitioners about the new
                 scheduling strategies as an alternative to Graham's
                 algorithm.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; online algorithms;
                 performance; scheduling",
}

@Article{Mehlhorn:2002:IWM,
  author =       "Kurt Mehlhorn and Guido Sch{\"a}fer",
  title =        "Implementation of {$O(nm \log n)$} weighted matchings
                 in general graphs: the power of data structures",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "4--4",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944622",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe the implementation of an algorithm which
                 solves the weighted matching problem in general graphs
                 with $n$ vertices and $m$ edges in time $O(nm \log n)$.
                 Our algorithm is a variant of the algorithm of Galil,
                 Micali and Gabow [Galil et al. 1986] and extensively
                 uses sophisticated data structures, in particular {\em
                 concatenable priority queues}, so as to reduce the time
                 needed to perform dual adjustments and to find tight
                 edges in Edmonds' blossom-shrinking algorithm. We
                 compare our implementation to the experimentally
                 fastest implementation, named {\em Blossom IV}, due to
                 Cook and Rohe [Cook and Rohe 1997]. Blossom IV requires
                 only very simple data structures and has an asymptotic
                 running time of $O(n^2 m)$. Our experiments show that
                 our new implementation is superior to Blossom IV. A
                 closer inspection reveals that the running time of
                 Edmonds' blossom-shrinking algorithm in practice
                 heavily depends on the time spent to perform dual
                 adjustments and to find tight edges. Therefore,
                 optimizing these operations, as is done in our
                 implementation, indeed speeds-up the practical
                 performance of implementations of Edmonds' algorithm.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Edelkamp:2002:IHQ,
  author =       "Stefan Edelkamp and Patrick Stiegeler",
  title =        "Implementing {{\em HEAPSORT\/}} with $(n \log n - 0.9
                 n)$ and {{\em QUICKSORT}\/} with $(n \log n + 0.2 n)$
                 comparisons",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "5--5",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944623",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "With refinements to the {\em WEAK-HEAPSORT\/}
                 algorithm we establish the general and practical
                 relevant sequential sorting algorithm {\em
                 INDEX-WEAK-HEAPSORT\/} with exactly $n \lceil \log n
                 \rceil - 2^{\lceil \log n \rceil} + 1 \leq n \log n 0.9
                 n$ comparisons and at most $n \log n + 0.1 n$
                 transpositions on any given input. It comprises an
                 integer array of size $n$ and is best used to generate
                 an index for the data set. With {\em
                 RELAXED-WEAK-HEAPSORT\/} and {\em
                 GREEDY-WEAK-HEAPSORT\/} we discuss modifications for a
                 smaller set of pending element transpositions. If extra
                 space to create an index is not available, with {\em
                 QUICK-WEAK-HEAPSORT\/} we propose an efficient {\em
                 QUICKSORT\/} variant with $n \log n + 0.2 n + o(n)$
                 comparisons on the average. Furthermore, we present
                 data showing that {\em WEAK-HEAPSORT,
                 INDEX-WEAK-HEAPSORT\/} and {\em QUICK-WEAK-HEAPSORT\/}
                 compete with other performant {\em QUICKSORT\/} and
                 {\em HEAPSORT\/} variants.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Erlebach:2002:IAA,
  author =       "Thomas Erlebach and Klaus Jansen",
  title =        "Implementation of approximation algorithms for
                 weighted and unweighted edge-disjoint paths in
                 bidirected trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "6--6",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944624",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a set of weighted directed paths in a bidirected
                 tree, the maximum weight edge-disjoint paths problem
                 (MWEDP) is to select a subset of the given paths such
                 that the selected paths are edge-disjoint and the total
                 weight of the selected paths is maximized. MWEDP is
                 {\em NP\/}-hard for bidirected trees of unbounded
                 degree, even if all weights are the same (the
                 unweighted case). Three different approximation
                 algorithms are implemented: a known combinatorial $(5/3
                 + \epsilon)$-approximation algorithm $A_1$ for the
                 unweighted case, a new combinatorial 2-approximation
                 algorithm $A_2$ for the weighted case, and a known $(5
                 / 3 + \epsilon)$-approximation algorithm $A_3$ for the
                 weighted case that is based on linear programming. For
                 algorithm $A_1$, it is shown how efficient data
                 structures can be used to obtain a worst-case
                 running-time of $O(m + n + 4^{1/\epsilon} \sqrt n c m)$
                 for instances consisting of $m$ paths in a tree with
                 $n$ nodes. Experimental results regarding the
                 running-times and the quality of the solutions obtained
                 by the three approximation algorithms are reported.
                 Where possible, the approximate solutions are compared
                 to the optimal solutions, which were computed by
                 running CPLEX on an integer linear programming
                 formulation of MWEDP.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; combinatorial optimization;
                 experimentation; linear programming",
}

@Article{Lassous:2002:PLR,
  author =       "Isabelle Gu{\'e}rin Lassous and Jens Gustedt",
  title =        "Portable list ranking: an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "7--7",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944625",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and analyze two portable algorithms for the
                 List Ranking Problem in the Coarse Grained
                 Multicomputer model (CGM). We report on implementations
                 of these algorithms and experiments that were done with
                 these on a variety of parallel and distributed
                 architectures, ranging from PC clusters to a mainframe
                 parallel machine. With these experiments, we validate
                 the chosen CGM model, and also show the possible gains
                 and limits of such algorithms.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Vahrenhold:2002:PPL,
  author =       "Jan Vahrenhold and Klaus H. Hinrichs",
  title =        "Planar point location for large data sets: to seek or
                 not to seek",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "8--8",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944626",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an algorithm for external memory planar
                 point location that is both effective and easy to
                 implement. The base algorithm is an external memory
                 variant of the bucket method by Edahiro, Kokubo and
                 Asano that is combined with Lee and Yang's batched
                 internal memory algorithm for planar point location.
                 Although our algorithm is not optimal in terms of its
                 worst-case behavior, we show its efficiency for both
                 batched and single-shot queries by experiments with
                 real-world data. The experiments show that the
                 algorithm benefits from the mainly sequential disk
                 access pattern and significantly outperforms the
                 fastest algorithm for internal memory. Due to its
                 simple concept, the algorithm can take advantage of
                 multiple disks and processors in a rather
                 straightforward yet efficient way.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Wickremesinghe:2002:ESU,
  author =       "Rajiv Wickremesinghe and Lars Arge and Jeffrey S.
                 Chase and Jeffrey Scott Vitter",
  title =        "Efficient sorting using registers and caches",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "9--9",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944627",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modern computer systems have increasingly complex
                 memory systems. Common machine models for algorithm
                 analysis do not reflect many of the features of these
                 systems, e.g., large register sets, lockup-free caches,
                 cache hierarchies, associativity, cache line fetching,
                 and streaming behavior. Inadequate models lead to poor
                 algorithmic choices and an incomplete understanding of
                 algorithm behavior on real machines. A key step toward
                 developing better models is to quantify the performance
                 effects of features not reflected in the models. This
                 paper explores the effect of memory system features on
                 sorting performance. We introduce a new cache-conscious
                 sorting algorithm, R-MERGE, which achieves better
                 performance in practice over algorithms that are
                 superior in the theoretical models. R-MERGE is designed
                 to minimize memory stall cycles rather than cache
                 misses by considering features common to many system
                 designs.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Herrmann:2002:FCN,
  author =       "Francine Herrmann and Alain Hertz",
  title =        "Finding the chromatic number by means of critical
                 graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "10--10",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944628",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose a new exact algorithm for finding the
                 chromatic number of a graph $G$. The algorithm attempts
                 to determine the smallest possible induced subgraph
                 $G'$ of $G$ which has the same chromatic number as $G$.
                 Such a subgraph is said critical since all proper
                 induced sub-graph of $G'$ have a chromatic number
                 strictly smaller than $G'$. The proposed method is
                 particularly helpful when a $k$-coloring of a
                 non-critical graph is known, and it has to be proved
                 that no $(k - 1)$-coloring of $G$ exists. Computational
                 experiments on random graphs and on DIMACS benchmark
                 problems demonstrate that the new proposed algorithm
                 can solve larger problem than previous known exact
                 methods.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; performance",
}

@Article{Fekete:2002:SHP,
  author =       "S{\'a}ndor P. Fekete and Henk Meijer and Andr{\'e}
                 Rohe and Walter Tietze",
  title =        "Solving a 'Hard' problem to approximate an 'Easy' one:
                 heuristics for maximum matchings and maximum traveling
                 salesman problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "11--11",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944629",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider geometric instances of the Maximum
                 Weighted Matching Problem (MWMP) and the Maximum
                 Traveling Salesman Problem (MTSP) with up to 3,000,000
                 vertices. Making use of a geometric duality
                 relationship between MWMP, MTSP, and the
                 Fermat--Weber-Problem (FWP), we develop a heuristic
                 approach that yields in near-linear time solutions as
                 well as upper bounds. Using various computational
                 tools, we get solutions within considerably less than
                 1\% of the optimum. An interesting feature of our
                 approach is that, even though an FWP is hard to compute
                 in theory and Edmonds' algorithm for maximum weighted
                 matching yields a polynomial solution for the MWMP, the
                 practical behavior is just the opposite, and we can
                 solve the FWP with high accuracy in order to find a
                 good heuristic solution for the MWMP.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation; Fermat--Weber problem; geometric
                 optimization; geometric problems; heuristics; maximum
                 traveling salesman problem (MTSP); maximum weighted
                 matching; near-linear algorithms",
}

@Article{Neri:2002:RCL,
  author =       "Filippo Neri",
  title =        "Relational concept learning by cooperative evolution",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "12--12",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/944618.944630",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Concept learning is a computationally demanding task
                 that involves searching large hypothesis spaces
                 containing candidate descriptions. Stochastic search
                 combined with parallel processing provide a promising
                 approach to successfully deal with such computationally
                 intensive tasks. Learning systems based on distributed
                 genetic algorithms (GA) were able to find concept
                 descriptions as accurate as the ones found by
                 state-of-the-art learning systems based on alternative
                 approaches. However, genetic algorithms' exploitation
                 has the drawback of being computationally demanding. We
                 show how a suitable architectural choice, named
                 cooperative evolution, allows to solve complex
                 applications in an acceptable user waiting time and
                 with a reasonable computational cost by using GA-based
                 learning systems because of the effective exploitation
                 of distributed computation. A variety of experimental
                 settings is analyzed and an explanation for the
                 empirical observations is proposed.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "distributed genetic algorithm; first order logic
                 concept learning; relational concept learning",
}

@Article{Kumar:2003:AME,
  author =       "Piyush Kumar and Joseph S. B. Mitchell and E. Alper
                 Yildirim",
  title =        "Approximate minimum enclosing balls in high dimensions
                 using core-sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996548",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the minimum enclosing ball (MEB) problem for
                 sets of points or balls in high dimensions. Using
                 techniques of second-order cone programming and
                 'core-sets', we have developed $(1 +
                 \epsilon)$-approximation algorithms that perform well
                 in practice, especially for very high dimensions, in
                 addition to having provable guarantees. We prove the
                 existence of core-sets of size $O(1/\epsilon)$,
                 improving the previous bound of $O(1/\epsilon^2)$, and
                 we study empirically how the core-set size grows with
                 dimension. We show that our algorithm, which is simple
                 to implement, results in fast computation of nearly
                 optimal solutions for point sets in much higher
                 dimension than previously computable using exact
                 techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; minimum enclosing ball;
                 second-order cone programming",
}

@Article{Arge:2003:EPL,
  author =       "Lars Arge and Andrew Danner and Sha-Mayn Teh",
  title =        "{I/O}-efficient point location using persistent
                 {B}-trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996549",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an external planar point location data
                 structure that is I/O-efficient both in theory and
                 practice. The developed structure uses linear space and
                 answers a query in optimal $O(\log B N)$ I/Os, where
                 $B$ is the disk block size. It is based on a persistent
                 B-tree, and all previously developed such structures
                 assume a total order on the elements in the structure.
                 As a theoretical result of independent interest, we
                 show how to remove this assumption. Most previous
                 theoretical I/O-efficient planar point location
                 structures are relatively complicated and have not been
                 implemented. Based on a bucket approach, Vahrenhold and
                 Hinrichs therefore developed a simple and practical,
                 but theoretically non-optimal, heuristic structure. We
                 present an extensive experimental evaluation that shows
                 that, on a range of real-world Geographic Information
                 Systems (GIS) data, our structure uses a similar number
                 of I/Os as the structure of Vahrenhold and Hinrichs to
                 answer a query. On a synthetically generated worst-case
                 dataset our structure uses significantly fewer I/Os.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchsbaum:2003:FPM,
  author =       "Adam L. Buchsbaum and Glenn S. Fowler and Balachannder
                 Kirishnamurthy and Kiem-Phong Vo and Jia Wang",
  title =        "Fast prefix matching of bounded strings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996550",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Longest Prefix Matching (LPM) is the problem of
                 finding which string from a given set is the longest
                 prefix of another, given string. LPM is a core problem
                 in many applications, including IP routing, network
                 data clustering, and telephone network management.
                 These applications typically require very fast matching
                 of bounded strings, i.e., strings that are short and
                 based on small alphabets. We note a simple
                 correspondence between bounded strings and natural
                 numbers that maps prefixes to nested intervals so that
                 computing the longest prefix matching a string is
                 equivalent to finding the shortest interval containing
                 its corresponding integer value. We then present {\em
                 retries}, a fast and compact data structure for LPM on
                 general alphabets. Performance results show that
                 retries often outperform previously published data
                 structures for IP look-up. By extending LPM to general
                 alphabets, retries admit new applications that could
                 not exploit prior LPM solutions designed for IP
                 look-ups.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "IP routing; prefix matching; table look-up; tries",
}

@Article{Breimer:2003:LAL,
  author =       "Eric A. Breimer and Mark K. Goldberg and Darren T.
                 Lim",
  title =        "A learning algorithm for the longest common
                 subsequence problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996552",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of a learning
                 algorithm for the longest common subsequence problem,
                 {\em LCS}. Given an arbitrary input domain, the
                 algorithm learns an {\em LCS\/}-procedure tailored to
                 that domain. The learning is done with the help of an
                 oracle, which can be any {\em LCS\/}-algorithm. After
                 solving a limited number of training inputs using an
                 oracle, the learning algorithm outputs a new {\em
                 LCS\/}-procedure. Our experiments demonstrate that, by
                 allowing a slight loss of optimality, learning yields a
                 procedure which is significantly faster than the
                 oracle. The oracle used for the experiments is the {\em
                 np\/}-procedure by Wu {\em et al.}, a modification of
                 Myers' classical {\em LCS\/}-algorithm. We show how to
                 scale up the results of learning on small inputs to
                 inputs of arbitrary lengths. For the domain of two
                 random 2-symbol inputs of length $n$, learning yields a
                 program with 0.999 expected accuracy, which runs in
                 $O(n^{1.41})$-time, in contrast with $O(n^2 \log n)$
                 running time of the fastest theoretical algorithm that
                 produces optimal solutions. For the domain of random
                 2-symbol inputs of length 100,000, the program runs
                 10.5 times faster than the {\em np\/}-procedure,
                 producing 0.999- accurate outputs. The scaled version
                 of the evolved algorithm applied to random inputs of
                 length 1 million runs approximately 30 times faster
                 than the {\em np\/}-procedure while constructing 0.999-
                 accurate solutions. We apply the evolved algorithm to
                 DNA sequences of various lengths by training on random
                 4-symbol sequences of up to length 10,000. The evolved
                 algorithm, scaled up to the lengths of up to 1.8
                 million, produces solutions with the 0.998-accuracy in
                 a fraction of the time used by the {\em np}.",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Venkataraman:2003:BAP,
  author =       "Gayathri Venkataraman and Sartaj Sahni and Srabani
                 Mukhopadhyaya",
  title =        "A blocked all-pairs shortest-paths algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996553",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose a blocked version of Floyd's all-pairs
                 shortest-paths algorithm. The blocked algorithm makes
                 better utilization of cache than does Floyd's original
                 algorithm. Experiments indicate that the blocked
                 algorithm delivers a speedup (relative to the unblocked
                 Floyd's algorithm) between 1.6 and 1.9 on a Sun Ultra
                 Enterprise 4000/5000 for graphs that have between 480
                 and 3200 vertices. The measured speedup on an SGI O2
                 for graphs with between 240 and 1200 vertices is
                 between 1.6 and 2.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "all pairs shortest paths; blocking; cache; speedup",
}

@Article{Petit:2003:EML,
  author =       "Jordi Petit",
  title =        "Experiments on the minimum linear arrangement
                 problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/996546.996554",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper deals with the Minimum Linear Arrangement
                 problem from an experimental point of view. Using a
                 testsuite of sparse graphs, we experimentally compare
                 several algorithms to obtain upper and lower bounds for
                 this problem. The algorithms considered include
                 Successive Augmentation heuristics, Local Search
                 heuristics and Spectral Sequencing. The testsuite is
                 based on two random models and 'real life' graphs. As a
                 consequence of this study, two main conclusions can be
                 drawn: On one hand, the best approximations are usually
                 obtained using Simulated Annealing, which involves a
                 large amount of computation time. Solutions found with
                 Spectral Sequencing are close to the ones found with
                 Simulated Annealing and can be obtained in
                 significantly less time. On the other hand, we notice
                 that there exists a big gap between the best obtained
                 upper bounds and the best obtained lower bounds. These
                 two facts together show that, in practice, finding
                 lower and upper bounds for the Minimum Linear
                 Arrangement problem is hard.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brandes:2004:GNC,
  author =       "Ulrik Brandes and Frank Schulz and Dorothea Wagner and
                 Thomas Willhalm",
  title =        "Generating node coordinates for shortest-path
                 computations in transportation networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1005815",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Speed-up techniques that exploit given node
                 coordinates have proven useful for shortest-path
                 computations in transportation networks and geographic
                 information systems. To facilitate the use of such
                 techniques when coordinates are missing from some, or
                 even all, of the nodes in a network we generate
                 artificial coordinates using methods from graph
                 drawing. Experiments on a large set of German train
                 timetables indicate that the speed-up achieved with
                 coordinates from our drawings is close to that achieved
                 with the true coordinates---and in some special cases
                 even better.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph drawing; shortest paths; transportation
                 networks; travel planning",
}

@Article{Niewiadomski:2004:PSD,
  author =       "Robert Niewiadomski and Jos{\'e} Nelson Amaral and
                 Robert C. Holte",
  title =        "A performance study of data layout techniques for
                 improving data locality in refinement-based
                 pathfinding",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1041511",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The widening gap between processor speed and memory
                 latency increases the importance of crafting data
                 structures and algorithms to exploit temporal and
                 spatial locality. Refinement-based pathfinding
                 algorithms, such as Classic Refinement (CR), find
                 quality paths in very large sparse graphs where
                 traditional search techniques fail to generate paths in
                 acceptable time. In this paper, we present a
                 performance evaluation study of three simple data
                 structure transformations aimed at improving the data
                 reference locality of CR. These transformations are
                 robust to changes in computer architecture and the
                 degree of compiler optimization. We test our
                 alternative designs on four contemporary architectures,
                 using two compilers for each machine. In our
                 experiments, the application of these techniques
                 results in performance improvements of up to 67\% with
                 consistent improvements above 15\%. Analysis reveals
                 that these improvements stem from improved data
                 reference locality at the page level and to a lesser
                 extent at the cache line level.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-conscious algorithms; classical refinement;
                 pathfinding",
}

@Article{Marathe:2004:ESS,
  author =       "Madhav V. Marathe and Alessandro Panconesi and Larry
                 D. {Risinger, Jr.}",
  title =        "An experimental study of a simple, distributed
                 edge-coloring algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1041515",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct an experimental analysis of a distributed
                 randomized algorithm for edge coloring simple
                 undirected graphs. The algorithm is extremely simple
                 yet, according to the probabilistic analysis, it
                 computes nearly optimal colorings very quickly [Grable
                 and Panconesi 1997]. We test the algorithm on a number
                 of random as well as nonrandom graph families. The test
                 cases were chosen based on two objectives: (i) to
                 provide insights into the worst-case behavior (in terms
                 of time and quality) of the algorithm and (ii) to test
                 the performance of the algorithm with instances that
                 are likely to arise in practice. Our main results
                 include the following:(1) The empirical results
                 obtained compare very well with the recent empirical
                 results reported by other researchers [Durand et al.
                 1994, 1998; Jain and Werth 1995].(2) The empirical
                 results confirm the bounds on the running time and the
                 solution quality as claimed in the theoretical paper.
                 Our results show that for certain classes of graphs the
                 algorithm is likely to perform much better than the
                 analysis suggests.(3) The results demonstrate that the
                 algorithm might be well suited (from a theoretical as
                 well as practical standpoint) for edge coloring graphs
                 quickly and efficiently in a distributed setting. Based
                 on our empirical study, we propose a simple
                 modification of the original algorithm with
                 substantially improved performance in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "distributed algorithms; edge coloring; experimental
                 analysis of algorithms; high performance computing;
                 randomized algorithms; scheduling",
}

@Article{Fredriksson:2004:AOS,
  author =       "Kimmo Fredriksson and Gonzalo Navarro",
  title =        "Average-optimal single and multiple approximate string
                 matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1041513",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a new algorithm for multiple approximate
                 string matching. It is based on reading backwards
                 enough l-grams from text windows so as to prove that no
                 occurrence can contain the part of the window read, and
                 then shifting the window. We show analytically that our
                 algorithm is optimal on average. Hence our first
                 contribution is to fill an important gap in the area,
                 since no average-optimal algorithm existed for multiple
                 approximate string matching. We consider several
                 variants and practical improvements to our algorithm,
                 and show experimentally that they are resistant to the
                 number of patterns and the fastest for low difference
                 ratios, displacing the long-standing best algorithms.
                 Hence our second contribution is to give a practical
                 algorithm for this problem, by far better than any
                 existing alternative in many cases of interest. On
                 real-life texts, our algorithm is especially
                 interesting for computational biology applications. In
                 particular, we show that our algorithm can be
                 successfully used to search for one pattern, where many
                 more competing algorithms exist. Our algorithm is also
                 average-optimal in this case, being the second after
                 that of Chang and Marr. However, our algorithm permits
                 higher difference ratios than Chang and Marr, and this
                 is our third contribution. In practice, our algorithm
                 is competitive in this scenario too, being the fastest
                 for low difference ratios and moderate alphabet sizes.
                 This is our fourth contribution, which also answers
                 affirmatively the question of whether a practical
                 average-optimal approximate string-matching algorithm
                 existed.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; approximate string matching; biological
                 sequences; multiple string matching; optimality",
}

@Article{Sinha:2004:CCS,
  author =       "Ranjan Sinha and Justin Zobel",
  title =        "Cache-conscious sorting of large sets of strings with
                 dynamic tries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.5:1--1.5:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1041517",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Ongoing changes in computer architecture are affecting
                 the efficiency of string-sorting algorithms. The size
                 of main memory in typical computers continues to grow
                 but memory accesses require increasing numbers of
                 instruction cycles, which is a problem for the most
                 efficient of the existing string-sorting algorithms as
                 they do not utilize cache well for large data sets. We
                 propose a new sorting algorithm for strings, burstsort,
                 based on dynamic construction of a compact trie in
                 which strings are kept in buckets. It is simple, fast,
                 and efficient. We experimentally explore key
                 implementation options and compare burstsort to
                 existing string-sorting algorithms on large and small
                 sets of strings with a range of characteristics. These
                 experiments show that, for large sets of strings,
                 burstsort is almost twice as fast as any previous
                 algorithm, primarily due to a lower rate of cache
                 miss.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Goh:2004:TAP,
  author =       "Rick Siow Mong Goh and Ian Li-Jin Thng",
  title =        "Twol-amalgamated priority queues",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.6:1--1.6:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1005813.1057625",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Priority queues are essential function blocks in
                 numerous applications such as discrete event
                 simulations. This paper describes and exemplifies the
                 ease of obtaining high performance priority queues
                 using a two-tier list-based structure. This new
                 implementation, called the {\em Twol\/} structure, is
                 amalgamated with three priority queues, namely, the
                 Henriksen's queue, splay tree and skew heap, to enhance
                 the efficiency of these {\em basal\/} priority queue
                 structures. Using a model that combines traditional
                 average case and amortized complexity analysis,
                 Twol-amalgamated priority queues that maintain $N$
                 active events are theoretically proven to offer $O(1)$
                 {\em expected amortized complexity\/} under reasonable
                 assumptions. They are also demonstrated empirically to
                 offer stable near $O(1)$ performance for widely varying
                 priority increment distributions and for queue sizes
                 ranging from 10 to 10 million. Extensive empirical
                 results show that the Twol-amalgamated priority queues
                 consistently outperform those basal structures (i.e.,
                 without the Twol structure) with an average speedup of
                 about three to five times on widely different hardware
                 architectures. These results provide testimony that the
                 Twol-amalgamated priority queues are suitable for
                 implementation in sizable application scenarios such
                 as, but not limited to, large-scale discrete event
                 simulation.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm analysis; calendar queue; discrete event
                 simulation; future event list; Henriksen's; pending
                 event set; priority queue; simulator; skew heap; splay
                 tree; tree",
}

@Article{Ioannidis:2005:ADS,
  author =       "Ioannis Ioannidis and Ananth Grama and Mikhail
                 Atallah",
  title =        "Adaptive data structures for {IP} lookups",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1064548",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The problem of efficient data structures for IP
                 lookups has been well studied in the literature.
                 Techniques such as LC tries and extensible hashing are
                 commonly used. In this paper, we address the problem of
                 generalizing LC tries, based on traces of past lookups,
                 to provide performance guarantees for memory suboptimal
                 structures. As a specific example, if a memory-optimal
                 (LC) trie takes 6 MB and the total memory at the router
                 is 8 MB, how should the trie be modified to make best
                 use of the 2 MB of excess memory? We present a greedy
                 algorithm for this problem and prove that, if for the
                 optimal data structure there are $b$ fewer memory
                 accesses on average for each lookup compared with the
                 original trie, the solution produced by the greedy
                 algorithm will have at least $9 \times b /11$ fewer
                 memory accesses on average (compared to the original
                 trie). An efficient implementation of this algorithm
                 presents significant additional challenges. We describe
                 an implementation with a time complexity of $O(\xi(d) n
                 \log n)$ and a space complexity of $O(n)$, where $n$ is
                 the number of nodes of the trie and $d$ its depth. The
                 depth of a trie is fixed for a given version of the
                 Internet protocol and is typically $O(\log n)$. In this
                 case, $\xi(d) = O(\log^2 n)$. We also demonstrate
                 experimentally the performance and scalability of the
                 algorithm on actual routing data.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "IP lookups; level compression",
}

@Article{Lesh:2005:NHI,
  author =       "N. Lesh and J. Marks and A. McMahon and M.
                 Mitzenmacher",
  title =        "New heuristic and interactive approaches to {$2$D}
                 rectangular strip packing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1083322",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we consider the two-dimensional
                 rectangular strip packing problem. A standard simple
                 heuristic, Bottom-Left-Decreasing (BLD), has been shown
                 to perform quite well in practice. We introduce and
                 demonstrate the effectiveness of BLD*, a stochastic
                 search variation of BLD. While BLD places the
                 rectangles in decreasing order of height, width, area,
                 and perimeter, BLD* successively tries random
                 orderings, chosen from a distribution determined by
                 their Kendall-tau distance from one of these fixed
                 orderings. Our experiments on benchmark problems show
                 that BLD* produces significantly better packings than
                 BLD after only 1 min of computation. Furthermore, we
                 also show that BLD* outperforms recently reported
                 metaheuristics. Furthermore, we observe that people
                 seem able to reason about packing problems extremely
                 well. We incorporate our new algorithms in an
                 interactive system that combines the advantages of
                 computer speed and human reasoning. Using the
                 interactive system, we are able to quickly produce
                 significantly better solutions than BLD* by itself.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "2D rectangular strip packing; cutting stock/trim;
                 interactive methods",
}

@Article{Wagner:2005:GCE,
  author =       "Dorothea Wagner and Thomas Willhalm and Christos
                 Zaroliagis",
  title =        "Geometric containers for efficient shortest-path
                 computation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1103378",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A fundamental approach in finding efficiently best
                 routes or optimal itineraries in traffic information
                 systems is to reduce the search space (part of graph
                 visited) of the most commonly used shortest path
                 routine (Dijkstra's algorithm) on a suitably defined
                 graph. We investigate reduction of the search space
                 while simultaneously retaining data structures, created
                 during a preprocessing phase, of size linear (i.e.,
                 optimal) to the size of the graph. We show that the
                 search space of Dijkstra's algorithm can be
                 significantly reduced by extracting geometric
                 information from a given layout of the graph and by
                 encapsulating precomputed shortest-path information in
                 resulted geometric objects (containers). We present an
                 extensive experimental study comparing the impact of
                 different types of geometric containers using test data
                 from real-world traffic networks. We also present new
                 algorithms as well as an empirical study for the
                 dynamic case of this problem, where edge weights are
                 subject to change and the geometric containers have to
                 be updated and show that our new methods are two to
                 three times faster than recomputing everything from
                 scratch. Finally, in an appendix, we discuss the
                 software framework that we developed to realize the
                 implementations of all of our variants of Dijkstra's
                 algorithm. Such a framework is not trivial to achieve
                 as our goal was to maintain a common code base that is,
                 at the same time, small, efficient, and flexible, as we
                 wanted to enhance and combine several variants in any
                 possible way.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures and algorithms; Dijkstra's algorithm;
                 geometric container; graph algorithms; shortest path;
                 traffic network",
}

@Article{Lopez-Ortiz:2005:FSS,
  author =       "Alejandro L{\'o}pez-Ortiz and Mehdi Mirzazadeh and
                 Mohammad Ali Safari and Hossein Sheikhattar",
  title =        "Fast string sorting using order-preserving
                 compression",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180611",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We give experimental evidence for the benefits of
                 order-preserving compression in sorting algorithms.
                 While, in general, any algorithm might benefit from
                 compressed data because of reduced paging requirements,
                 we identified two natural candidates that would further
                 benefit from order-preserving compression, namely
                 string-oriented sorting algorithms and word-RAM
                 algorithms for keys of bounded length. The word-RAM
                 model has some of the fastest known sorting algorithms
                 in practice. These algorithms are designed for keys of
                 bounded length, usually 32 or 64 bits, which limits
                 their direct applicability for strings. One possibility
                 is to use an order-preserving compression scheme, so
                 that a bounded-key-length algorithm can be applied. For
                 the case of standard algorithms, we took what is
                 considered to be the among the fastest nonword RAM
                 string sorting algorithms, Fast MKQSort, and measured
                 its performance on compressed data. The Fast MKQSort
                 algorithm of Bentley and Sedgewick is optimized to
                 handle text strings. Our experiments show that
                 order-compression techniques results in savings of
                 approximately 15\% over the same algorithm on
                 noncompressed data. For the word-RAM, we modified
                 Andersson's sorting algorithm to handle variable-length
                 keys. The resulting algorithm is faster than the
                 standard Unix sort by a factor of 1.5 $X$. Last, we
                 used an order-preserving scheme that is within a
                 constant additive term of the optimal Hu--Tucker, but
                 requires linear time rather than $O(m \log m)$, where
                 $m = |\Sigma|$ is the size of the alphabet.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "order-preserving compression; sorting; unit-cost RAM;
                 word-RAM",
}

@Article{Ribeiro:2005:P,
  author =       "Celso C. Ribeiro and Simone L. Martins",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180620",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Sinha:2005:URS,
  author =       "Ranjan Sinha and Justin Zobel",
  title =        "Using random sampling to build approximate tries for
                 efficient string sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.10:1--2.10:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180622",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Algorithms for sorting large datasets can be made more
                 efficient with careful use of memory hierarchies and
                 reduction in the number of costly memory accesses. In
                 earlier work, we introduced burstsort, a new
                 string-sorting algorithm that on large sets of strings
                 is almost twice as fast as previous algorithms,
                 primarily because it is more cache efficient. Burstsort
                 dynamically builds a small trie that is used to rapidly
                 allocate each string to a bucket. In this paper, we
                 introduce new variants of our algorithm: SR-burstsort,
                 DR-burstsort, and DRL-burstsort. These algorithms use a
                 random sample of the strings to construct an
                 approximation to the trie prior to sorting. Our
                 experimental results with sets of over 30 million
                 strings show that the new variants reduce, by up to
                 37\%, cache misses further than did the original
                 burstsort, while simultaneously reducing instruction
                 counts by up to 24\%. In pathological cases, even
                 further savings can be obtained.",
  acknowledgement = ack-nhfb,
  articleno =    "2.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-aware; cache-conscious; data structure;
                 in-memory; sorting; string",
}

@Article{Bracht:2005:GAA,
  author =       "Evandro C. Bracht and Luis and A. A. Meira and F. K.
                 Miyazawa",
  title =        "A greedy approximation algorithm for the uniform
                 metric labeling problem analyzed by a primal-dual
                 technique",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.11:1--2.11:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180623",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the uniform metric labeling problem. This
                 NP-hard problem considers how to assign objects to
                 labels respecting assignment and separation costs. The
                 known approximation algorithms are based on solutions
                 of large linear programs and are impractical for
                 moderate- and large-size instances. We present an 8log
                 $n$-approximation algorithm that can be applied to
                 large-size instances. The algorithm is greedy and is
                 analyzed by a primal-dual technique. We implemented the
                 presented algorithm and two known approximation
                 algorithms and compared them at randomized instances.
                 The gain of time was considerable with small error
                 ratios. We also show that the analysis is tight, up to
                 a constant factor.",
  acknowledgement = ack-nhfb,
  articleno =    "2.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; graph labeling",
}

@Article{deSouza:2005:DMP,
  author =       "Cid C. de Souza and Andre M. Lima and Guido Araujo and
                 Nahri B. Moreano",
  title =        "The datapath merging problem in reconfigurable
                 systems: {Complexity}, dual bounds and heuristic
                 evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180613",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we investigate the data path merging
                 problem (DPM) in reconfigurable systems. DPM is modeled
                 as a graph optimization problem and is shown to be {\em
                 NP\/}-hard. An Integer Programming (IP) formulation of
                 the problem is presented and some valid inequalities
                 for the convex hull of integer solutions are
                 introduced. These inequalities form the basis of a
                 branch-and-cut algorithm that we implemented. This
                 algorithm was used to compute lower bounds for a set of
                 DPM instances, allowing us to assess the performance of
                 two heuristics proposed earlier in the literature for
                 the problem. Moreover, the branch-and-cut algorithm
                 also was proved to be a valuable tool to solve
                 small-sized DPM instances to optimality.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data path merging; heuristics; lower bounds;
                 reconfigurable systems",
}

@Article{Du:2005:IAA,
  author =       "Jingde Du and Stavros G. Kolliopoulos",
  title =        "Implementing approximation algorithms for the
                 single-source unsplittable flow problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180614",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the {\em single-source unsplittable flow\/}
                 problem, commodities must be routed simultaneously from
                 a common source vertex to certain sinks in a given
                 graph with edge capacities. The demand of each
                 commodity must be routed along a single path so that
                 the total flow through any edge is at most, its
                 capacity. This problem was introduced by Kleinberg
                 [1996a] and generalizes several NP-complete problems. A
                 cost value per unit of flow may also be defined for
                 every edge. In this paper, we implement the
                 2-approximation algorithm of Dinitz et al. [1999] for
                 congestion, which is the best known, and the (3,
                 1)-approximation algorithm of Skutella [2002] for
                 congestion and cost, which is the best known bicriteria
                 approximation. We experimentally study the quality of
                 approximation achieved by the algorithms and the effect
                 of heuristics on their performance. We also compare
                 these algorithms against the previous best ones by
                 Kolliopoulos and Stein [1999].",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; network flow; unsplittable
                 flow",
}

@Article{Duch:2005:IPM,
  author =       "Amalia Duch and Conrado Mart{\'\i}nez",
  title =        "Improving the performance of multidimensional search
                 using fingers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.4:1--2.4:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180615",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose two variants of $K$-d trees where {\em
                 fingers\/} are used to improve the performance of
                 orthogonal range search and nearest neighbor queries
                 when they exhibit locality of reference. The
                 experiments show that the second alternative yields
                 significant savings. Although it yields more modest
                 improvements, the first variant does it with much less
                 memory requirements and great simplicity, which makes
                 it more attractive on practical grounds.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "experimental algorithmics; Finger search; K-d trees;
                 locality; multidimensional data structures;
                 nearest-neighbors searching; orthogonal range
                 searching",
}

@Article{Holzer:2005:CST,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner and
                 Thomas Willhalm",
  title =        "Combining speed-up techniques for shortest-path
                 computations",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.5:1--2.5:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180616",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In practice, computing a shortest path from one node
                 to another in a directed graph is a very common task.
                 This problem is classically solved by Dijkstra's
                 algorithm. Many techniques are known to speed up this
                 algorithm heuristically, while optimality of the
                 solution can still be guaranteed. In most studies, such
                 techniques are considered individually. The focus of
                 our work is {\em combination\/} of speed-up techniques
                 for Dijkstra's algorithm. We consider all possible
                 combinations of four known techniques, namely, {\em
                 goal-directed search}, {\em bidirectional search}, {\em
                 multilevel approach}, and {\em shortest-path
                 containers}, and show how these can be implemented. In
                 an extensive experimental study, we compare the
                 performance of the various combinations and analyze how
                 the techniques harmonize when jointly applied. Several
                 real-world graphs from road maps and public transport
                 and three types of generated random graphs are taken
                 into account.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "combination; Dijkstra's algorithm; shortest path;
                 speed-up",
}

@Article{Hyyro:2005:IBP,
  author =       "Heikki Hyyr{\"o} and Kimmo Fredriksson and Gonzalo
                 Navarro",
  title =        "Increased bit-parallelism for approximate and multiple
                 string matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.6:1--2.6:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180617",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Bit-parallelism permits executing several operations
                 simultaneously over a set of bits or numbers stored in
                 a single computer word. This technique permits
                 searching for the approximate occurrences of a pattern
                 of length $m$ in a text of length $n$ in time $O(\lceil
                 m / w \rceil n)$, where $w$ is the number of bits in
                 the computer word. Although this is asymptotically the
                 optimal bit-parallel speedup over the basic $O(mn)$
                 time algorithm, it wastes bit-parallelism's power in
                 the common case where $m$ is much smaller than $w$,
                 since $w - m$ bits in the computer words are unused. In
                 this paper, we explore different ways to increase the
                 bit-parallelism when the search pattern is short.
                 First, we show how multiple patterns can be packed into
                 a single computer word so as to search for all them
                 simultaneously. Instead of spending $O(rn)$ time to
                 search for $r$ patterns of length $m \leq w / 2$, we
                 need $O(\lceil rm / w \rceil n)$ time. Second, we show
                 how the mechanism permits boosting the search for a
                 single pattern of length $m \leq w / 2$, which can be
                 searched for in $O(\lceil n / \lfloor w / m \rfloor
                 \rceil)$ bit-parallel steps instead of $O(n)$. Third,
                 we show how to extend these algorithms so that the time
                 bounds essentially depend on $k$ instead of $m$, where
                 $k$ is the maximum number of differences permitted.
                 Finally, we show how the ideas can be applied to other
                 problems such as multiple exact string matching and
                 one-against-all computation of edit distance and
                 longest common subsequences. Our experimental results
                 show that the new algorithms work well in practice,
                 obtaining significant speedups over the best existing
                 alternatives, especially on short patterns and moderate
                 number of differences allowed. This work fills an
                 important gap in the field, where little work has
                 focused on very short patterns.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximate string matching; bit-parallelism; multiple
                 string matching",
}

@Article{Nikolov:2005:SEH,
  author =       "Nikola S. Nikolov and Alexandre Tarassov and
                 J{\"u}rgen Branke",
  title =        "In search for efficient heuristics for minimum-width
                 graph layering with consideration of dummy nodes",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.7:1--2.7:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180618",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose two fast heuristics for solving the NP-hard
                 problem of graph layering with the minimum width and
                 consideration of dummy nodes. Our heuristics can be
                 used at the layer-assignment phase of the Sugiyama
                 method for drawing of directed graphs. We evaluate our
                 heuristics by comparing them to the widely used
                 fast-layering algorithms in an extensive computational
                 study with nearly 6000 input graphs. We also
                 demonstrate how the well-known longest-path and
                 Coffman--Graham algorithms can be used for finding
                 narrow layerings with acceptable aesthetic
                 properties.",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dummy vertices; hierarchical graph drawing; layer
                 assignment; layered graphs; layering",
}

@Article{Pemmaraju:2005:AIC,
  author =       "Sriram V. Pemmaraju and Sriram Penumatcha and Rajiv
                 Raman",
  title =        "Approximating interval coloring and max-coloring in
                 chordal graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.8:1--2.8:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180619",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider two coloring problems: interval coloring
                 and max-coloring for chordal graphs. Given a graph $G =
                 (V, E)$ and positive-integral vertex weights $w: V
                 \rightarrow N$, the {\em interval-coloring\/} problem
                 seeks to find an assignment of a real interval $I(u)$
                 to each vertex $u \in V$, such that two constraints are
                 satisfied: (i) for every vertex $u \in V$, $|I(u)| =
                 w(u)$ and (ii) for every pair of adjacent vertices $u$
                 and $v$, $I(u) \cap I(v) = \emptyset$. The goal is to
                 minimize the {\em span\/} $|\cup_{v \in V} I(v)|$. The
                 {\em max-coloring problem\/} seeks to find a proper
                 vertex coloring of $G$ whose color classes $C_1$,
                 $C_2$, \ldots{}, $C_k$, minimize the sum of the weights
                 of the heaviest vertices in the color classes, that is,
                 $\sum^k_i = 1 \hbox{max}_{v \epsilon C i w (v)}$. Both
                 problems arise in efficient memory allocation for
                 programs. The interval-coloring problem models the
                 compile-time memory allocation problem and has a rich
                 history dating back at least to the 1970s. The
                 max-coloring problem arises in minimizing the total
                 buffer size needed by a dedicated memory manager for
                 programs. In another application, this problem models
                 scheduling of conflicting jobs in batches to minimize
                 the {\em makespan}. Both problems are NP-complete even
                 for interval graphs, although there are constant-factor
                 approximation algorithms for both problems on interval
                 graphs. In this paper, we consider these problems for
                 {\em chordal graphs}, a subclass of perfect graphs.
                 These graphs naturally generalize interval graphs and
                 can be defined as the class of graphs that have no
                 induced cycle of length $> 3$. Recently, a
                 4-approximation algorithm (which we call GeomFit) has
                 been presented for the max-coloring problem on perfect
                 graphs (Pemmaraju and Raman 2005). This algorithm can
                 be used to obtain an interval coloring as well, but
                 without the constant-factor approximation guarantee. In
                 fact, there is no known constant-factor approximation
                 algorithm for the interval-coloring problem on perfect
                 graphs. We study the performance of GeomFit and several
                 simple $O(\log(n))$-factor approximation algorithms for
                 both problems. We experimentally evaluate and compare
                 four simple heuristics: first-fit, best-fit, GeomFit,
                 and a heuristic based on partitioning the graph into
                 vertex sets of similar weight. Both for max-coloring
                 and for interval coloring, GeomFit deviates from OPT by
                 about 1.5\%, on average. The performance of first-fit
                 comes close second, deviating from OPT by less than
                 6\%, on average, for both problems. Best-fit comes
                 third and graph-partitioning heuristic comes a distant
                 last. Our basic data comes from about 10,000 runs of
                 each of the heuristics for each of the two problems on
                 randomly generated chordal graphs of various sizes,
                 sparsity, and structure.",
  acknowledgement = ack-nhfb,
  articleno =    "2.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "chordal graphs; dynamic storage allocation; graph
                 coloring; perfect graphs",
}

@Article{Santos:2005:TSH,
  author =       "Haroldo G. Santos and Luiz S. Ochi and Marcone J. F.
                 Souza",
  title =        "A {Tabu} search heuristic with efficient
                 diversification strategies for the class\slash teacher
                 timetabling problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.9:1--2.9:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1064546.1180621",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Class/Teacher Timetabling Problem (CTTP) deals
                 with the weekly scheduling of encounters between
                 teachers and classes of an educational institution.
                 Since CTTP is a NP-hard problem for nearly all of its
                 variants, the use of heuristic methods for its
                 resolution is justified. This paper presents an
                 efficient Tabu Search (TS) heuristic with two different
                 memory based diversification strategies for CTTP.
                 Results obtained through an application of the method
                 to a set of real world problems show that it produces
                 better solutions than a previously proposed TS found in
                 the literature and faster times are observed in the
                 production of good quality solutions.",
  acknowledgement = ack-nhfb,
  articleno =    "2.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "metaheuristics; tabu search; timetabling",
}

@Article{Salmela:2006:MSM,
  author =       "Leena Salmela and Jorma Tarhio and Jari Kyt{\"o}joki",
  title =        "Multipattern string matching with $q$-grams",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1187438",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three algorithms for exact string matching
                 of multiple patterns. Our algorithms are filtering
                 methods, which apply $q$-grams and bit parallelism. We
                 ran extensive experiments with them and compared them
                 with various versions of earlier algorithms, e.g.,
                 different trie implementations of the Aho--Corasick
                 algorithm. All of our algorithms appeared to be
                 substantially faster than earlier solutions for sets of
                 1,000--10,000 patterns and the good performance of two
                 of them continues to 100,000 patterns. The gain is
                 because of the improved filtering efficiency caused by
                 $q$-grams.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "content scanning; intrusion detection; multiple string
                 matching",
}

@Article{Sinha:2006:CES,
  author =       "Ranjan Sinha and Justin Zobel and David Ring",
  title =        "Cache-efficient string sorting using copying",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1187439",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Burstsort is a cache-oriented sorting technique that
                 uses a dynamic trie to efficiently divide large sets of
                 string keys into related subsets small enough to sort
                 in cache. In our original burstsort, string keys
                 sharing a common prefix were managed via a bucket of
                 pointers represented as a list or array; this approach
                 was found to be up to twice as fast as the previous
                 best string sorts, mostly because of a sharp reduction
                 in out-of-cache references. In this paper, we introduce
                 C-burstsort, which copies the unexamined tail of each
                 key to the bucket and discards the original key to
                 improve data locality. On both Intel and PowerPC
                 architectures, and on a wide range of string types, we
                 show that sorting is typically twice as fast as our
                 original burstsort and four to five times faster than
                 multikey quicksort and previous radixsorts. A variant
                 that copies both suffixes and record pointers to
                 buckets, CP-burstsort, uses more memory, but provides
                 stable sorting. In current computers, where performance
                 is limited by memory access latencies, these new
                 algorithms can dramatically reduce the time needed for
                 internal sorting of large numbers of strings.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; cache; experimental algorithms; sorting;
                 string management; tries",
}

@Article{Penner:2006:CFI,
  author =       "Michael Penner and Viktor K. Prasanna",
  title =        "Cache-Friendly implementations of transitive closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1210586",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The topic of cache performance has been well studied
                 in recent years. Compiler optimizations exist and
                 optimizations have been done for many problems. Much of
                 this work has focused on dense linear algebra problems.
                 At first glance, the Floyd--Warshall algorithm appears
                 to fall into this category. In this paper, we begin by
                 applying two standard cache-friendly optimizations to
                 the Floyd--Warshall algorithm and show limited
                 performance improvements. We then discuss the
                 unidirectional space time representation (USTR). We
                 show analytically that the USTR can be used to reduce
                 the amount of processor-memory traffic by a factor of
                 $O(\sqrt C)$, where $C$ is the cache size, for a large
                 class of algorithms. Since the USTR leads to a tiled
                 implementation, we develop a tile size selection
                 heuristic to intelligently narrow the search space for
                 the tile size that minimizes total execution time.
                 Using the USTR, we develop a cache-friendly
                 implementation of the Floyd--Warshall algorithm. We
                 show experimentally that this implementation minimizes
                 the level-1 and level-2 cache misses and TLB misses
                 and, therefore, exhibits the best overall performance.
                 Using this implementation, we show a $2 \times$
                 improvement in performance over the best compiler
                 optimized implementation on three different
                 architectures. Finally, we show analytically that our
                 implementation of the Floyd--Warshall algorithm is
                 asymptotically optimal with respect to processor-memory
                 traffic. We show experimental results for the Pentium
                 III, Alpha, and MIPS R12000 machines using problem
                 sizes between 1024 and 2048 vertices. We demonstrate
                 improved cache performance using the Simplescalar
                 simulator.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; Floyd--Warshall algorithm; systolic
                 array algorithms",
}

@Article{Goshi:2006:ADM,
  author =       "Justin Goshi and Richard E. Ladner",
  title =        "Algorithms for dynamic multicast key distribution",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1210587",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the problem of multicast key distribution for
                 group security. Secure group communication systems
                 typically rely on a group key, which is a secret shared
                 among the members of the group. This key is used to
                 provide privacy by encrypting all group communications.
                 Because groups can be large and highly dynamic, it
                 becomes necessary to change the group key in a scalable
                 and secure fashion when members join and leave the
                 group. We present a series of algorithms for solving
                 this problem based on key trees. The algorithms attempt
                 to minimize the worst-case communication cost of
                 updates by maintaining balanced key tree structures. We
                 focus on the trade-off between the communication cost
                 because of the structure of the tree and that due to
                 the overhead of restructuring the tree to maintain its
                 balanced structure. The algorithms are analyzed for
                 worst-case tree structure bounds and evaluated
                 empirically via simulations.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic key distribution; experimental algorithms;
                 multicast",
}

@Article{Aleksandrov:2006:PPG,
  author =       "Lyudmil Aleksandrov and Hristo Djidjev and Hua Guo and
                 Anil Maheshwari",
  title =        "Partitioning planar graphs with costs and weights",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.5:1--1.5:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1210588",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A graph separator is a set of vertices or edges whose
                 removal divides an input graph into components of
                 bounded size. This paper describes new algorithms for
                 computing separators in planar graphs as well as
                 techniques that can be used to speed up the
                 implementation of graph partitioning algorithms and
                 improve the partition quality. In particular, we
                 consider planar graphs with costs and weights on the
                 vertices, where weights are used to estimate the sizes
                 of the partitions and costs are used to estimate the
                 size of the separator. We show that in these graphs one
                 can always find a small cost separator (consisting of
                 vertices or edges) that partitions the graph into
                 components of bounded weight. We describe
                 implementations of the partitioning algorithms and
                 discuss results of our experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph algorithms; graph partitioning algorithms; graph
                 separators; implementation",
}

@Article{Ilinkin:2006:HEC,
  author =       "Ivayio Ilinkin and Ravi Janardan and Michiel Smid and
                 Eric Johnson and Paul Castillo and J{\"o}rg Schwerdt",
  title =        "Heuristics for estimating contact area of supports in
                 layered manufacturing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.6:1--1.6:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1210589",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Layered manufacturing is a technology that allows
                 physical prototypes of three-dimensional(3D) models to
                 be built directly from their digital representation, as
                 a stack of two-dimensional(2D) layers. A key design
                 problem here is the choice of a suitable direction in
                 which the digital model should be oriented and built so
                 as to minimize the area of contact between the
                 prototype and temporary support structures that are
                 generated during the build. Devising an efficient
                 algorithm for computing such a direction has remained a
                 difficult problem for quite some time. In this paper, a
                 suite of efficient and practical heuristics is
                 presented for estimating the minimum contact area. Also
                 given is a technique for evaluating the quality of the
                 estimate provided by any heuristic, which does not
                 require knowledge of the (unknown and hard-to-compute)
                 optimal solution; instead, it provides an indirect
                 upper bound on the quality of the estimate via two
                 relatively easy-to-compute quantities. The algorithms
                 are based on various techniques from computational
                 geometry, such as ray-shooting, convex hulls, boolean
                 operations on polygons, and spherical arrangements, and
                 have been implemented and tested. Experimental results
                 on a wide range of real-world models show that the
                 heuristics perform quite well in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm implementation and testing; computational
                 geometry",
}

@Article{Pearce:2006:DTS,
  author =       "David J. Pearce and Paul H. J. Kelly",
  title =        "A dynamic topological sort algorithm for directed
                 acyclic graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.7:1--1.7:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1210590",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of maintaining the topological
                 order of a directed acyclic graph (DAG) in the presence
                 of edge insertions and deletions. We present a new
                 algorithm and, although this has inferior time
                 complexity compared with the best previously known
                 result, we find that its simplicity leads to better
                 performance in practice. In addition, we provide an
                 empirical comparison against the three main
                 alternatives over a large number of random DAGs. The
                 results show our algorithm is the best for sparse
                 digraphs and only a constant factor slower than the
                 best on dense digraphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic graph algorithms; topological sort",
}

@Article{Flammini:2006:RAF,
  author =       "Michele Flammini and Alfredo Navarra and Stephane
                 Perennes",
  title =        "The ``real'' approximation factor of the {MST}
                 heuristic for the minimum energy broadcasting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.10:1--2.10:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216587",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper deals with one of the most studied problems
                 in the last few years in the field of wireless
                 communication in ad-hoc networks. The problem consists
                 of reducing the total energy consumption of wireless
                 radio stations distributed over a given area of
                 interest in order to perform the basic pattern of
                 communication by a broadcast. Recently, a tight
                 6-approximation of the minimum spanning tree heuristic
                 has been proven. While such a bound is theoretically
                 optimal if compared to the known lower bound of 6,
                 there is an obvious gap with practical experimental
                 results. By extensive experiments, proposing a new
                 technique to generate input instances and supported by
                 theoretical results, we show how the approximation
                 ratio can be actually considered close to 4 for a
                 ``real-world'' set of instances. We consider, in fact,
                 instances more representative of common practices.
                 Those are usually composed by considerable number of
                 nodes uniformly and randomly distributed inside the
                 area of interest.",
  acknowledgement = ack-nhfb,
  articleno =    "2.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "ad-hoc networks; broadcast; energy saving; spanning
                 tree",
}

@Article{Nikoletseas:2006:JSS,
  author =       "Sotiris Nikoletseas",
  title =        "{JEA Special Section}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216578",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Fahle:2006:FBB,
  author =       "Torsten Fahle and Karsten Tiemann",
  title =        "A faster branch-and-bound algorithm for the test-cover
                 problem based on set-covering techniques",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216579",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The test-cover problem asks for the minimal number of
                 tests needed to uniquely identify a disease, infection,
                 etc. A collection of branch-and-bound algorithms was
                 proposed by De Bontridder et al. [2002]. Based on their
                 work, we introduce several improvements that are
                 compatible with all techniques described in De
                 Bontridder et al. [2002] and the more general setting
                 of {\em weighted\/} test-cover problems. We present a
                 faster data structure, cost-based variable fixing, and
                 adapt well-known set-covering techniques, including
                 Lagrangian relaxation and upper-bound heuristics. The
                 resulting algorithm solves benchmark instances up to 10
                 times faster than the former approach and up to 100
                 times faster than a general MIP solver.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "branch-and-bound; Lagrangian relaxation; set-cover
                 problem; test-cover problem; variable fixing",
}

@Article{Leone:2006:FPN,
  author =       "Pierre Leone and Jose Rolim and Paul Albuquerque and
                 Christian Mazza",
  title =        "A framework for probabilistic numerical evaluation of
                 sensor networks: a case study of a localization
                 protocol",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216580",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we show how to use stochastic estimation
                 methods to investigate topological properties of sensor
                 networks as well as the behavior of dynamical processes
                 on these networks. The framework is particularly
                 important to study problems for which no theoretical
                 results are known, or cannot be directly applied in
                 practice, for instance, when only asymptotic results
                 are available. We also interpret Russo's formula in the
                 context of sensor networks and thus obtain practical
                 information on their reliability. As a case study, we
                 analyze a localization protocol for wireless sensor
                 networks and validate our approach by numerical
                 experiments. Finally, we mention three applications of
                 our approach: estimating the number of pivotal sensors
                 in a real network, minimizing the number of such
                 sensors for robustness purposes during the network
                 design and estimating the distance between successive
                 localized positions for mobile sensor networks.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "localization process and reliability; sensor networks;
                 stochastic recursive estimation",
}

@Article{Festa:2006:GPR,
  author =       "Paola Festa and Panos M. Pardalos and Leonidas S.
                 Pitsoulis and Mauricio G. C. Resende",
  title =        "{GRASP} with path relinking for the weighted {MAXSAT}
                 problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.4:1--2.4:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216581",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A GRASP with path relinking for finding good-quality
                 solutions of the weighted maximum satisfiability
                 problem (MAX-SAT) is described in this paper. GRASP, or
                 Greedy Randomized Adaptive Search Procedure, is a
                 randomized multistart metaheuristic, where, at each
                 iteration, locally optimal solutions are constructed,
                 each independent of the others. Previous experimental
                 results indicate its effectiveness for solving weighted
                 MAX-SAT instances. Path relinking is a procedure used
                 to intensify the search around good-quality isolated
                 solutions that have been produced by the GRASP
                 heuristic. Experimental comparison of the pure GRASP
                 (without path relinking) and the GRASP with path
                 relinking illustrates the effectiveness of path
                 relinking in decreasing the average time needed to find
                 a good-quality solution for the weighted maximum
                 satisfiability problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; GRASP; heuristics; path
                 relinking; performance; time-to-target plots",
}

@Article{Mehlhorn:2006:IMC,
  author =       "Kurt Mehlhorn and Dimitrios Michail",
  title =        "Implementing minimum cycle basis algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.5:1--2.5:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216582",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we consider the problem of computing a
                 minimum cycle basis of an undirected graph $G$ = ($V$,
                 $E$) with $n$ vertices and $m$ edges. We describe an
                 efficient implementation of an $O(m^3 + mn^2 \log n)$
                 algorithm. For sparse graphs, this is the currently
                 best-known algorithm. This algorithm's running time can
                 be partitioned into two parts with time $O(m^3)$ and
                 $O(m^2 n + mn^2 \log n)$, respectively. Our
                 experimental findings imply that for random graphs the
                 true bottleneck of a sophisticated implementation is
                 the $O(m^2 n + mn^2 \log n)$ part. A straightforward
                 implementation would require $\Omega(n m)$
                 shortest-path computations. Thus, we develop several
                 heuristics in order to get a practical algorithm. Our
                 experiments show that in random graphs our techniques
                 result in a significant speed-up. Based on our
                 experimental observations, we combine the two
                 fundamentally different approaches to compute a minimum
                 cycle basis to obtain a new hybrid algorithm with
                 running time $O(m^2 n^2)$. The hybrid algorithm is very
                 efficient, in practice, for random dense unweighted
                 graphs. Finally, we compare these two algorithms with a
                 number of previous implementations for finding a
                 minimum cycle basis of an undirected graph.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cycle basis; graph algorithms",
}

@Article{Heinrich-Litan:2006:RCR,
  author =       "Laura Heinrich-Litan and Marco E. L{\"u}bbecke",
  title =        "Rectangle covers revisited computationally",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.6:1--2.6:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216583",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of covering an orthogonal
                 polygon with a minimum number of axis-parallel
                 rectangles from a computational point of view. We
                 propose an integer program which is the first general
                 approach to obtain provably optimal solutions to this
                 well-studied NP-hard problem. It applies to common
                 variants like covering only the corners or the boundary
                 of the polygon and also to the weighted case. In
                 experiments, it turns out that the linear programming
                 relaxation is extremely tight and rounding a fractional
                 solution is an immediate high-quality heuristic. We
                 obtain excellent experimental results for polygons
                 originating from VLSI design, fax data sheets, black
                 and white images, and for random instances. Making use
                 of the dual linear program, we propose a stronger lower
                 bound on the optimum, namely, the cardinality of a
                 fractional stable set. Furthermore, we outline ideas
                 how to make use of this bound in primal--dual-based
                 algorithms. We give partial results, which make us
                 believe that our proposals have a strong potential to
                 settle the main open problem in the area: To find a
                 constant factor approximation algorithm for the
                 rectangle cover problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "integer programming; linear programming",
}

@Article{Panagopoulou:2006:APN,
  author =       "Panagiota N. Panagopoulou and Paul G. Spirakis",
  title =        "Algorithms for pure {Nash} equilibria in weighted
                 congestion games",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.7:1--2.7:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216584",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In large-scale or evolving networks, such as the
                 Internet, there is no authority possible to enforce a
                 centralized traffic management. In such situations,
                 game theory, and especially the concepts of Nash
                 equilibria and congestion games [Rosenthal 1973] are a
                 suitable framework for analyzing the equilibrium
                 effects of selfish routes selection to network delays.
                 We focus here on {\em single-commodity\/} networks
                 where selfish users select paths to route their loads
                 (represented by arbitrary integer {\em weights\/}). We
                 assume that individual link delays are equal to the
                 total load of the link. We then focus on the algorithm
                 suggested in Fotakis et al. [2005], i.e., a
                 potential-based method for finding {\em pure\/} Nash
                 equilibria in such networks. A superficial analysis of
                 this algorithm gives an upper bound on its time, which
                 is polynomial in $n$ (the number of users) and the sum
                 of their weights $W$. This bound can be exponential in
                 $n$ when some weights are exponential. We provide
                 strong experimental evidence that this algorithm
                 actually converges to a pure Nash equilibrium in {\em
                 polynomial time}. More specifically, our experimental
                 findings suggest that the running time is a polynomial
                 function of $n$ and $\log W$. In addition, we propose
                 an initial allocation of users to paths that
                 dramatically accelerates this algorithm, compared to an
                 arbitrary initial allocation. A by-product of our
                 research is the discovery of a weighted potential
                 function when link delays are {\em exponential\/} to
                 their loads. This asserts the existence of pure Nash
                 equilibria for these delay functions and extends the
                 result of Fotakis et al. [2005].",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "congestion games; game theory; pure Nash equilibria",
}

@Article{Mohring:2006:PGS,
  author =       "Rolf H. M{\"o}hring and Heiko Schilling and Birk
                 Sch{\"u}tz and Dorothea Wagner and Thomas Willhalm",
  title =        "Partitioning graphs to speedup {Dijkstra}'s
                 algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.8:1--2.8:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216585",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study an acceleration method for point-to-point
                 shortest-path computations in large and sparse directed
                 graphs with given nonnegative arc weights. The
                 acceleration method is called the {\em arc-flag
                 approach\/} and is based on Dijkstra's algorithm. In
                 the arc-flag approach, we allow a preprocessing of the
                 network data to generate additional information, which
                 is then used to speedup shortest-path queries. In the
                 preprocessing phase, the graph is divided into regions
                 and information is gathered on whether an arc is on a
                 shortest path into a given region. The arc-flag method
                 combined with an appropriate partitioning and a
                 bidirected search achieves an average speedup factor of
                 more than 500 compared to the standard algorithm of
                 Dijkstra on large networks (1 million nodes, 2.5
                 million arcs). This combination narrows down the search
                 space of Dijkstra's algorithm to almost the size of the
                 corresponding shortest path for long-distance
                 shortest-path queries. We conduct an experimental study
                 that evaluates which partitionings are best suited for
                 the arc-flag method. In particular, we examine
                 partitioning algorithms from computational geometry and
                 a multiway arc separator partitioning. The evaluation
                 was done on German road networks. The impact of
                 different partitions on the speedup of the shortest
                 path algorithm are compared. Furthermore, we present an
                 extension of the speedup technique to multiple levels
                 of partitions. With this multilevel variant, the same
                 speedup factors can be achieved with smaller space
                 requirements. It can, therefore, be seen as a
                 compression of the precomputed data that preserves the
                 correctness of the computed shortest paths.",
  acknowledgement = ack-nhfb,
  articleno =    "2.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "acceleration method; Dijkstra's algorithm; road
                 network; shortest path",
}

@Article{Boukerche:2006:ICC,
  author =       "Azzedine Boukerche and Alba Cristina Magalhaes {Alves
                 De Melo}",
  title =        "Integrating coordinated checkpointing and recovery
                 mechanisms into {DSM} synchronization barriers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.9:1--2.9:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1187436.1216586",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Distributed shared memory (DSM) creates an abstraction
                 of a physical shared memory that parallel programmers
                 can access. Most recent software DSM systems provide
                 relaxed-memory models that guarantee consistency only
                 at synchronization operations, such as locks and
                 barriers. As the main goal of DSM systems is to provide
                 support for long-term computation-intensive
                 applications, checkpointing and recovery mechanisms are
                 highly desirable. This article presents and evaluates
                 the integration of a coordinated checkpointing
                 mechanism to the barrier primitive that is usually
                 provided with many DSM systems. Our results on some
                 popular benchmarks and a real parallel application show
                 that the overhead introduced during the failure-free
                 execution is often small.",
  acknowledgement = ack-nhfb,
  articleno =    "2.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "barrier synchronization; distributed shared memory",
}

@Article{Anonymous:2008:EGC,
  author =       "Anonymous",
  title =        "Engineering graph clustering: {Models} and
                 experimental evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.1:1--1.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227162",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A promising approach to graph clustering is based on
                 the intuitive notion of intracluster density versus
                 intercluster sparsity. As for the weighted case,
                 clusters should accumulate lots of weight, in contrast
                 to their connection to the remaining graph, which
                 should be light. While both formalizations and
                 algorithms focusing on particular aspects of this
                 rather vague concept have been proposed, no conclusive
                 argument on their appropriateness has been given. In
                 order to deepen the understanding of particular
                 concepts, including both quality assessment as well as
                 designing new algorithms, we conducted an experimental
                 evaluation of graph-clustering approaches. By combining
                 proved techniques from graph partitioning and geometric
                 clustering, we also introduce a new approach that
                 compares favorably.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "clustering algorithms; experimental evaluation; graph
                 clustering; quality measures",
}

@Article{Barsky:2008:GAT,
  author =       "Marina Barsky and Ulrike Stege and Alex Thomo and
                 Chris Upton",
  title =        "A graph approach to the threshold all-against-all
                 substring matching problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.10:1--1.10:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1370596.1370601",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a novel graph model and an efficient
                 algorithm for solving the ``threshold all against all''
                 problem, which involves searching two strings (with
                 length $M$ and $N$, respectively) for all maximal
                 approximate substring matches of length at least $S$,
                 with up to $K$ differences. Our algorithm solves the
                 problem in time $O(MNK_3)$, which is a considerable
                 improvement over the previous known bound for this
                 problem. We also provide experimental evidence that, in
                 practice, our algorithm exhibits a better performance
                 than its worst-case running time.",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bioinformatics; complexity; string matching",
}

@Article{Dietzfelbinger:2008:DIB,
  author =       "Martin Dietzfelbinger and Martin H{\"u}hne and
                 Christoph Weidling",
  title =        "A dictionary implementation based on dynamic perfect
                 hashing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.11:1--1.11:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1370596.1370602",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe experimental results on an implementation
                 of a dynamic dictionary. The basis of our
                 implementation is ``dynamic perfect hashing'' as
                 described by Dietzfelbinger et al. ({\em SIAM J.
                 Computing 23}, 1994, pp. 738--761), an extension of the
                 storage scheme proposed by Fredman et al. ({\em J.
                 ACM\/} 31, 1984, pp. 538--544). At the top level, a
                 hash function is used to partition the keys to be
                 stored into several sets. On the second level, there is
                 a perfect hash function for each of these sets. This
                 technique guarantees $O(1)$ worst-case time for lookup
                 and expected $O(1)$ amortized time for insertion and
                 deletion, while only linear space is required. We study
                 the practical performance of dynamic perfect hashing
                 and describe improvements of the basic scheme. The
                 focus is on the choice of the hash function (both for
                 integer and string keys), on the efficiency of
                 rehashing, on the handling of small buckets, and on the
                 space requirements of the implementation.",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; dictionaries; dynamic hashing; hash
                 functions; implementation",
}

@Article{Maniscalco:2008:EVA,
  author =       "Michael A. Maniscalco and Simon J. Puglisi",
  title =        "An efficient, versatile approach to suffix sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.2:1--1.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1278374",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Sorting the suffixes of a string into lexicographical
                 order is a fundamental task in a number of contexts,
                 most notably lossless compression (Burrows--Wheeler
                 transformation) and text indexing (suffix arrays). Most
                 approaches to suffix sorting produce a sorted array of
                 suffixes directly, continually moving suffixes into
                 their final place in the array until the ordering is
                 complete. In this article, we describe a novel and
                 resource-efficient (time and memory) approach to suffix
                 sorting, which works in a complementary way --- by
                 assigning each suffix its rank in the final ordering,
                 before converting to a sorted array, if necessary, once
                 all suffixes are ranked. We layer several powerful
                 extensions on this basic idea and show experimentally
                 that our approach is superior to other leading
                 algorithms in a variety of real-world contexts.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Burrows--Wheeler transform; suffix array; suffix
                 sorting; suffix tree",
}

@Article{Aloul:2008:SBP,
  author =       "Fadi A. Aloul and Arathi Ramani and Igor L. Markov and
                 Karem A. Sakallah",
  title =        "Symmetry breaking for pseudo-{Boolean} formulas",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.3:1--1.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1278375",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Many important tasks in design automation and
                 artificial intelligence can be performed in practice
                 via reductions to Boolean satisfiability (SAT).
                 However, such reductions often omit
                 application-specific structure, thus handicapping tools
                 in their competition with creative engineers.
                 Successful attempts to represent and utilize additional
                 structure on Boolean variables include recent work on
                 0-1 integer linear programming (ILP) and symmetries in
                 SAT. Those extensions gracefully accommodate well-known
                 advances in SAT solving, however, no previous work has
                 attempted to combine both extensions. Our work shows
                 (i) how one can detect and use symmetries in instances
                 of 0-1 ILP, and (ii) what benefits this may bring.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph automorphism",
}

@Article{Pellegrini:2008:EIT,
  author =       "Marco Pellegrini and Giordano Fusco",
  title =        "Efficient {IP} table lookup via adaptive stratified
                 trees with selective reconstructions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.4:1--1.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1278376",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "IP address lookup is a critical operation for
                 high-bandwidth routers in packet-switching networks,
                 such as Internet. The lookup is a nontrivial operation,
                 since it requires searching for the longest prefix,
                 among those stored in a (large) given table, matching
                 the IP address. Ever increasing routing table size,
                 traffic volume, and links speed demand new and more
                 efficient algorithms. Moreover, the imminent move to
                 IPv6 128-bit addresses will soon require a rethinking
                 of previous technical choices. This article describes a
                 the new data structure for solving the IP table lookup
                 problem christened the adaptive stratified tree (AST).
                 The proposed solution is based on casting the problem
                 in geometric terms and on repeated application of
                 efficient local geometric optimization routines.
                 Experiments with this approach have shown that in terms
                 of storage, query time, and update time the AST is at a
                 par with state of the art algorithms based on data
                 compression or string manipulations (and often it is
                 better on some of the measured quantities).",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; IP table lookup",
}

@Article{Navarro:2008:DSA,
  author =       "Gonzalo Navarro and Nora Reyes",
  title =        "Dynamic spatial approximation trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.5:1--1.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1322337",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Metric space searching is an emerging technique to
                 address the problem of efficient similarity searching
                 in many applications, including multimedia databases
                 and other repositories handling complex objects.
                 Although promising, the metric space approach is still
                 immature in several aspects that are well established
                 in traditional databases. In particular, most indexing
                 schemes are static, that is, few of them tolerate
                 insertion or deletion of elements at reasonable cost
                 over an existing index. The spatial approximation tree
                 ({\em sa--tree\/}) has been experimentally shown to
                 provide a good tradeoff between construction cost,
                 search cost, and space requirement. However, the {\em
                 sa--tree\/} is static, which renders it unsuitable for
                 many database applications. In this paper, we study
                 different methods to handle insertions and deletions on
                 the {\em sa--tree\/} at low cost. In many cases, the
                 dynamic construction (by successive insertions) is even
                 faster than the previous static construction, and both
                 are similar elsewhere. In addition, the dynamic version
                 significantly improves the search performance of {\em
                 sa--trees\/} in virtually all cases. The result is a
                 much more practical data structure that can be useful
                 in a wide range of database applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "multimedia databases; similarity or proximity search;
                 spatial and multidimensional search; spatial
                 approximation tree",
}

@Article{Li:2008:EAC,
  author =       "Keqin Li",
  title =        "Experimental average-case performance evaluation of
                 online algorithms for routing and wavelength assignment
                 and throughput maximization in {WDM} optical networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.7:1--1.7:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1370596.1370598",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We investigate the problem of online routing and
                 wavelength assignment and the related throughput
                 maximization problem in wavelength division
                 multiplexing optical networks. It is pointed out that
                 these problems are highly inapproximable, that is, the
                 competitive ratio of any algorithm is at least a
                 polynomial. We evaluate the average-case performance of
                 several online algorithms, which have no knowledge of
                 future arriving connection requests when processing the
                 current connection request. Our experimental results on
                 a wide range of optical networks demonstrate that the
                 average-case performance of these algorithms are very
                 close to optimal.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "average-case performance; competitive ratio; online
                 algorithm; optical network; routing; wavelength
                 assignment; wavelength division multiplexing",
}

@Article{Biggar:2008:ESS,
  author =       "Paul Biggar and Nicholas Nash and Kevin Williams and
                 David Gregg",
  title =        "An experimental study of sorting and branch
                 prediction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.8:1--1.8:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1370599",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Sorting is one of the most important and well-studied
                 problems in computer science. Many good algorithms are
                 known which offer various trade-offs in efficiency,
                 simplicity, memory use, and other factors. However,
                 these algorithms do not take into account features of
                 modern computer architectures that significantly
                 influence performance. Caches and branch predictors are
                 two such features and, while there has been a
                 significant amount of research into the cache
                 performance of general purpose sorting algorithms,
                 there has been little research on their branch
                 prediction properties. In this paper, we empirically
                 examine the behavior of the branches in all the most
                 common sorting algorithms. We also consider the
                 interaction of cache optimization on the predictability
                 of the branches in these algorithms. We find insertion
                 sort to have the fewest branch mispredictions of any
                 comparison-based sorting algorithm, that bubble and
                 shaker sort operate in a fashion that makes their
                 branches highly unpredictable, that the
                 unpredictability of shellsort's branches improves its
                 caching behavior, and that several cache optimizations
                 have little effect on mergesort's branch
                 mispredictions. We find also that optimizations to
                 quicksort, for example the choice of pivot, have a
                 strong influence on the predictability of its branches.
                 We point out a simple way of removing branch
                 instructions from a classic heapsort implementation and
                 also show that unrolling a loop in a cache-optimized
                 heapsort implementation improves the predicitability of
                 its branches. Finally, we note that when sorting random
                 data two-level adaptive branch predictors are usually
                 no better than simpler bimodal predictors. This is
                 despite the fact that two-level adaptive predictors are
                 almost always superior to bimodal predictors, in
                 general.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "branch prediction; caching; pipeline architectures;
                 sorting",
}

@Article{Hazel:2008:TCL,
  author =       "Thomas Hazel and Laura Toma and Jan Vahrenhold and
                 Rajiv Wickremesinghe",
  title =        "Terracost: {Computing} least-cost-path surfaces for
                 massive grid terrains",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.9:1--1.9:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1370596.1370600",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper addresses the problem of computing
                 least-cost-path surfaces for massive grid terrains.
                 Consider a grid terrain $T$ and let $C$ be a cost grid
                 for $T$ such that every point in $C$ stores a value
                 that represents the cost of traversing the
                 corresponding point in $T$. Given $C$ and a set of
                 sources $S \in T$, a least-cost-path grid $\Delta$ for
                 $T$ is a grid such that every point in $\Delta$
                 represents the distance to the source in $S$ that can
                 be reached with minimal cost. We present a scalable
                 approach to computing least-cost-path grids. Our
                 algorithm, terracost, is derived from our previous work
                 on I/O-efficient shortest paths on grids and uses
                 $O(\hbox{sort}(n))$ I/Os, where $\hbox{sort}(n)$ is the
                 complexity of sorting $n$ items of data in the
                 I/O-model of Aggarwal and Vitter. We present the
                 design, the analysis, and an experimental study of
                 terracost. An added benefit of the algorithm underlying
                 terracost is that it naturally lends itself to
                 parallelization. We have implemented terracost in a
                 distributed environment using our cluster management
                 tool and report on experiments that show that it
                 obtains speedup near-linear with the size of the
                 cluster. To the best of our knowledge, this is the
                 first experimental evaluation of a multiple-source
                 least-cost-path algorithm in the external memory
                 setting.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures and algorithms; Dijkstra's algorithm;
                 I/O-efficiency; shortest paths; terrain data",
}

@Article{Arge:2008:P,
  author =       "Lars Arge and Giuseppe F. Italiano",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.1:1--2.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227163",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brodal:2008:ECO,
  author =       "Gerth St{\o}lting Brodal and Rolf Fagerberg and
                 Kristoffer Vinther",
  title =        "Engineering a cache-oblivious sorting algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.2:1--2.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227164",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper is an algorithmic engineering study of
                 cache-oblivious sorting. We investigate by empirical
                 methods a number of implementation issues and parameter
                 choices for the cache-oblivious sorting algorithm Lazy
                 Funnelsort and compare the final algorithm with
                 Quicksort, the established standard for
                 comparison-based sorting, as well as with recent
                 cache-aware proposals. The main result is a carefully
                 implemented cache-oblivious sorting algorithm, which,
                 our experiments show, can be faster than the best
                 Quicksort implementation we are able to find for input
                 sizes well within the limits of RAM. It is also at
                 least as fast as the recent cache-aware implementations
                 included in the test. On disk, the difference is even
                 more pronounced regarding Quicksort and the cache-aware
                 algorithms, whereas the algorithm is slower than a
                 careful implementation of multiway Mergesort, such as
                 TPIE.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-oblivious algorithms; funnelsort; quicksort",
}

@Article{Bender:2008:SSH,
  author =       "Michael A. Bender and Bryan Bradley and Geetha
                 Jagannathan and Krishnan Pillaipakkamnatt",
  title =        "Sum-of-squares heuristics for bin packing and memory
                 allocation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.3:1--2.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227165",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The sum-of-squares algorithm (SS) was introduced by
                 Csirik, Johnson, Kenyon, Shor, and Weber for online bin
                 packing of integral-sized items into integral-sized
                 bins. First, we show the results of experiments from
                 two new variants of the SS algorithm. The first
                 variant, which runs in time $O(n \sqrt{B \log B})$,
                 appears to have almost identical expected waste as the
                 sum-of-squares algorithm on all the distributions
                 mentioned in the original papers on this topic. The
                 other variant, which runs in $O(n \log B)$ time,
                 performs well on most, but not on all of those
                 distributions. We also apply SS to the online
                 memory-allocation problem. Our experimental comparisons
                 between SS and Best Fit indicate that neither algorithm
                 is consistently better than the other. If the amount of
                 randomness in item sizes is low, SS appears to have
                 lower waste than Best Fit, whereas, if the amount of
                 randomness is high Best Fit appears to have lower waste
                 than SS. Our experiments suggest that in both real and
                 synthetic traces, SS does not seem to have an
                 asymptotic advantage over Best Fit, in contrast with
                 the bin-packing problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bin packing; memory allocation; sum of squares",
}

@Article{Pyrga:2008:EMT,
  author =       "Evangelia Pyrga and Frank Schulz and Dorothea Wagner
                 and Christos Zaroliagis",
  title =        "Efficient models for timetable information in public
                 transportation systems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.4:1--2.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227166",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider two approaches that model timetable
                 information in public transportation systems as
                 shortest-path problems in weighted graphs. In the {\em
                 time-expanded\/} approach, every event at a station,
                 e.g., the departure of a train, is modeled as a node in
                 the graph, while in the {\em time-dependent\/} approach
                 the graph contains only one node per station. Both
                 approaches have been recently considered for (a
                 simplified version of) the earliest arrival problem,
                 but little is known about their relative performance.
                 Thus far, there are only theoretical arguments in favor
                 of the time-dependent approach. In this paper, we
                 provide the first extensive experimental comparison of
                 the two approaches. Using several real-world data sets,
                 we evaluate the performance of the basic models and of
                 several new extensions towards realistic modeling.
                 Furthermore, new insights on solving bicriteria
                 optimization problems in both models are presented. The
                 time-expanded approach turns out to be more robust for
                 modeling more complex scenarios, whereas the
                 time-dependent approach shows a clearly better
                 performance.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "itinerary query; public transportation system;
                 shortest path; timetable information",
}

@Article{Leaver-Fay:2008:FPH,
  author =       "Andrew Leaver-Fay and Yuanxin Liu and Jack Snoeyink
                 and Xueyi Wang",
  title =        "Faster placement of hydrogens in protein structures by
                 dynamic programming",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.5:1--2.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1227167",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "M. Word and coauthors from the Richardsons' 3D Protein
                 Structure laboratory at Duke University propose {\em
                 dot scores\/} to measure interatomic interactions in
                 molecular structures. Their program REDUCE uses these
                 scores in a brute-force search to solve instances of
                 the {\em NP\/}-hard problem of finding the optimal
                 placement of hydrogen atoms in molecular structures
                 determined by X-ray crystallography. We capture the
                 central combinatorial optimization in the hydrogen
                 placement problem with an abstraction that we call an
                 interaction (hyper)graph. REDUCE's dot-based scoring
                 function cannot be decomposed into the sum of pair
                 interactions, but because the function is short ranged
                 we are able to decompose it into the sum of single,
                 pair, triple, and quadruple interactions that we
                 represent by graph hyperedges. Almost every interaction
                 graph we have observed has had a small treewidth. This
                 fact allows us to replace the brute-force search by
                 dynamic programming, giving speedups of nearly ten
                 orders of magnitude. This dynamic programming has been
                 incorporated into REDUCE and is available for
                 download.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic programming; hard-sphere model; hydrogen
                 bonds; hydrogen placement; protein structure;
                 treewidth",
}

@Article{Demetrescu:2008:PA,
  author =       "Camil Demetrescu and Roberto Tamassia",
  title =        "Papers from {ALENEX 2005}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.1:1--3.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1402293",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brodal:2008:AQ,
  author =       "Gerth St{\o}lting Brodal and Rolf Fagerberg and
                 Gabriel Moruz",
  title =        "On the adaptiveness of {Quicksort}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.2:1--3.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1402294",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Quicksort was first introduced in 1961 by Hoare. Many
                 variants have been developed, the best of which are
                 among the fastest generic-sorting algorithms available,
                 as testified by the choice of Quicksort as the default
                 sorting algorithm in most programming libraries. Some
                 sorting algorithms are adaptive, i.e., they have a
                 complexity analysis that is better for inputs, which
                 are nearly sorted, according to some specified measure
                 of presortedness. Quicksort is not among these, as it
                 uses $\Omega (n \log n)$ comparisons even for sorted
                 inputs. However, in this paper, we demonstrate
                 empirically that the actual running time of Quicksort
                 {\em is\/} adaptive with respect to the presortedness
                 measure Inv. Differences close to a factor of two are
                 observed between instances with low and high Inv value.
                 We then show that for the randomized version of
                 Quicksort, the number of element {\em swaps\/}
                 performed is {\em provably\/} adaptive with respect to
                 the measure $\hbox{Inv}$. More precisely, we prove that
                 randomized Quicksort performs expected $O(n (1 + \log(1
                 + \hbox{Inv} / n)))$ element swaps, where $\hbox{Inv}$
                 denotes the number of inversions in the input sequence.
                 This result provides a theoretical explanation for the
                 observed behavior and gives new insights on the
                 behavior of Quicksort. We also give some empirical
                 results on the adaptive behavior of Heapsort and
                 Mergesort.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "adaptive sorting; branch mispredictions; Quicksort",
}

@Article{Codenotti:2008:ESD,
  author =       "Bruno Codenotti and Benton Mccune and Sriram Pemmaraju
                 and Rajiv Raman and Kasturi Varadarajan",
  title =        "An experimental study of different approaches to solve
                 the market equilibrium problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.3:1--3.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1402295",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Over the last few years, the problem of computing
                 market equilibrium prices for exchange economies has
                 received much attention in the theoretical computer
                 science community. Such activity led to a flurry of
                 polynomial time algorithms for various restricted, yet
                 significant, settings. The most important restrictions
                 arise either when the traders' utility functions
                 satisfy a property known as {\em gross
                 substitutability\/} or when the initial endowments are
                 proportional (the Fisher model). In this paper, we
                 experimentally compare the performance of some of these
                 recent algorithms against that of the most used
                 software packages. In particular, we evaluate the
                 following approaches: (1) the solver PATH, available
                 under GAMS/MPSGE, a popular tool for computing market
                 equilibrium prices; (2) a discrete version of a simple
                 iterative price update scheme called t{\^a}tonnement;
                 (3) a discrete version of the welfare adjustment
                 process; (4) convex feasibility programs that
                 characterize the equilibrium in some special cases. We
                 analyze the performance of these approaches on models
                 of exchange economies where the consumers are equipped
                 with utility functions, which are widely used in real
                 world applications. The outcomes of our experiments
                 consistently show that many market settings allow for
                 an efficient computation of the equilibrium, well
                 beyond the restrictions under which the theory provides
                 polynomial time guarantees. For some of the approaches,
                 we also identify models where they are prone to
                 failure.",
  acknowledgement = ack-nhfb,
  articleno =    "3.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "market equilibrium",
}

@Article{Dementiev:2008:BEM,
  author =       "Roman Dementiev and Juha K{\"a}rkk{\"a}inen and Jens
                 Mehnert and Peter Sanders",
  title =        "Better external memory suffix array construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.4:1--3.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1402296",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Suffix arrays are a simple and powerful data structure
                 for text processing that can be used for full text
                 indexes, data compression, and many other applications,
                 in particular, in bioinformatics. However, so far, it
                 has appeared prohibitive to build suffix arrays for
                 huge inputs that do not fit into main memory. This
                 paper presents design, analysis, implementation, and
                 experimental evaluation of several new and improved
                 algorithms for suffix array construction. The
                 algorithms are asymptotically optimal in the worst case
                 or on average. Our implementation can construct suffix
                 arrays for inputs of up to 4-GB in hours on a low-cost
                 machine. As a tool of possible independent interest, we
                 present a systematic way to design, analyze, and
                 implement {\em pipelined\/} algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "3.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm engineering; algorithms for strings;
                 external memory; I/O-efficient; large data sets;
                 secondary memory; suffix array",
}

@Article{Swenson:2008:ATE,
  author =       "Krister M. Swenson and Mark Marron and Joel V.
                 Earnest-Deyoung and Bernard M. E. Moret",
  title =        "Approximating the true evolutionary distance between
                 two genomes",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.5:1--3.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1227161.1402297",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "As more and more genomes are sequenced, evolutionary
                 biologists are becoming increasingly interested in
                 evolution at the level of whole genomes, in scenarios
                 in which the genome evolves through insertions,
                 duplications, deletions, and movements of genes along
                 its chromosomes. In the mathematical model pioneered by
                 Sankoff and others, a unichromosomal genome is
                 represented by a signed permutation of a multiset of
                 genes; Hannenhalli and Pevzner showed that the edit
                 distance between two signed permutations of the same
                 set can be computed in polynomial time when all
                 operations are inversions. El-Mabrouk extended that
                 result to allow deletions and a limited form of
                 insertions (which forbids duplications); in turn we
                 extended it to compute a nearly optimal edit sequence
                 between an arbitrary genome and the identity
                 permutation. In this paper we generalize our approach
                 to compute distances between two arbitrary genomes, but
                 focus on approximating the true evolutionary distance
                 rather than the edit distance. We present experimental
                 results showing that our algorithm produces excellent
                 estimates of the true evolutionary distance up to a
                 (high) threshold of saturation; indeed, the distances
                 thus produced are good enough to enable the simple
                 neighbor-joining procedure to reconstruct our test
                 trees with high accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "3.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "duplications; evolution; inversions; pairwise
                 distances; whole-genome data",
}

@Article{Krommidas:2008:ESA,
  author =       "Ioannis Krommidas and Christos Zaroliagis",
  title =        "An experimental study of algorithms for fully dynamic
                 transitive closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1370596.1370597",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We have conducted an extensive experimental study on
                 algorithms for fully dynamic transitive closure. We
                 have implemented the recent fully dynamic algorithms by
                 King [1999], Roditty [2003], Roditty and Zwick [2002,
                 2004], and Demetrescu and Italiano [2000, 2005] along
                 with several variants and compared them to pseudo fully
                 dynamic and simple-minded algorithms developed in a
                 previous study [Frigioni et al. 2001]. We tested and
                 compared these implementations on random inputs,
                 synthetic (worst-case) inputs, and on inputs motivated
                 by real-world graphs. Our experiments reveal that some
                 of the dynamic algorithms can really be of practical
                 value in many situations.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithm; path; reachability; transitive
                 closure",
}

@Article{Gottlob:2008:BBA,
  author =       "Georg Gottlob and Marko Samer",
  title =        "A backtracking-based algorithm for hypertree
                 decomposition",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "1:1--1:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412229",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Hypertree decompositions of hypergraphs are a
                 generalization of tree decompositions of graphs. The
                 corresponding hypertree-width is a measure for the
                 acyclicity and therefore an indicator for the
                 tractability of the associated computation problem.
                 Several NP-hard decision and computation problems are
                 known to be tractable on instances whose structure is
                 represented by hypergraphs of bounded hypertree-width.
                 Roughly speaking, the smaller the hypertree-width, the
                 faster the computation problem can be solved. In this
                 paper, we present the new backtracking-based algorithm
                 det-$k$-decomp for computing hypertree decompositions
                 of small width. Our benchmark evaluations have shown
                 that det-$k$-decomp significantly outperforms opt-
                 $k$-decomp, the only exact hypertree decomposition
                 algorithm so far. Even compared to the best heuristic
                 algorithm, we obtained competitive results as long as
                 the hypergraphs are sufficiently simple.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "constraint satisfaction; hypertree decomposition",
}

@Article{Raman:2008:P,
  author =       "Rajeev Raman and Matt Stallmann",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "1:1--1:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412235",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gramm:2008:DRE,
  author =       "Jens Gramm and Jiong Guo and Falk H{\"u}ffner and Rolf
                 Niedermeier",
  title =        "Data reduction and exact algorithms for clique cover",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "2:1--2:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412236",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "To cover the edges of a graph with a minimum number of
                 cliques is an NP-hard problem with many applications.
                 For this problem we develop efficient and effective
                 polynomial-time data reduction rules that, combined
                 with a search tree algorithm, allow for exact problem
                 solutions in competitive time. This is confirmed by
                 experiments with real-world and synthetic data.
                 Moreover, we prove the fixed-parameter tractability of
                 covering edges by cliques.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "clique cover; data reduction; fixed-parameter
                 tractability",
}

@Article{Haran:2008:ESP,
  author =       "Idit Haran and Dan Halperin",
  title =        "An experimental study of point location in planar
                 arrangements in {CGAL}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "3:1--3:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412237",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the performance in practice of various
                 point-location algorithms implemented in CGAL (the
                 Computational Geometry Algorithms Library), including a
                 newly devised {\em landmarks\/} algorithm. Among the
                 other algorithms studied are: a na{\"\i}ve approach, a
                 ``walk along a line'' strategy, and a trapezoidal
                 decomposition-based search structure. The current
                 implementation addresses general arrangements of planar
                 curves, including arrangements of nonlinear segments
                 (e.g., conic arcs) and allows for degenerate input (for
                 example, more than two curves intersecting in a single
                 point or overlapping curves). The algorithms use exact
                 geometric computation and thus result in the correct
                 point location. In our landmarks algorithm (a.k.a. jump
                 \& walk), special points, ``landmarks,'' are chosen in
                 a preprocessing stage, their place in the arrangement
                 is found, and they are inserted into a data structure
                 that enables efficient nearest-neighbor search. Given a
                 query point, the nearest landmark is located and a
                 ``walk'' strategy is applied from the landmark to the
                 query point. We report on various experiments with
                 arrangements composed of line segments or conic arcs.
                 The results indicate that compared to the other
                 algorithms tested, the landmarks approach is the most
                 efficient, when the overall (amortized) cost of a query
                 is taken into account, combining both preprocessing and
                 query time. The simplicity of the algorithm enables an
                 almost straightforward implementation and rather easy
                 maintenance. The generic programming implementation
                 allows versatility both in the selected type of
                 landmarks and in the choice of the nearest-neighbor
                 search structure. The end result is an efficient
                 point-location algorithm that bypasses the alternative
                 CGAL implementations in most practical aspects.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "arrangements; CGAL; computational geometry; generic
                 programming; point location",
}

@Article{Lanthier:2008:CAC,
  author =       "Mark A. Lanthier and Doron Nussbaum and Tsuo-Jung
                 Wang",
  title =        "Computing an approximation of the $1$-center problem
                 on weighted terrain surfaces",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "3:1--3:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412231",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we discuss the problem of determining
                 a meeting point of a set of scattered robots $R = r_1,
                 r _2, \ldots{}, r_s$ in a weighted terrain $P$, which
                 has $n > s$ triangular faces. Our algorithmic approach
                 is to produce a discretization of $P$ by producing a
                 graph $G = \{V^G, E^G\}$, which lies on the surface of
                 $P$. For a chosen vertex $p' \in V^G$, we define
                 $|\Pi(r_i, p')|$ as the minimum weight cost of
                 traveling from $r_i$ to $p'$. We show that min$_{p'}
                 \in V^G$ \hbox{max}$_{1\leq i \leq s} |\Pi(r_i, p')|
                 \leq \hbox{min}_p *\in P \hbox{max}_{1\leq i \leq s}
                 |{\Pi}(r_i, p*)| + 2 W |L|$, where $L$ is the longest
                 edge of $P$, $W$ is the maximum cost weight of a face
                 of $P$, and $p*$ is the optimal solution. Our algorithm
                 requires $O(s n m \log(s n m) + s n m^2)$ time to run,
                 where $m = n$ in the Euclidean metric and $m = n^2$ in
                 the weighted metric. However, we show, through
                 experimentation, that only a constant value of $m$ is
                 required (e.g., $m = 8$) in order to produce very
                 accurate solutions.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "1-Center; algorithms; approximation; meeting point;
                 robots; shortest path; terrain; weighted",
}

@Article{Hershberger:2008:SSD,
  author =       "John Hershberger and Nisheeth Shrivastava and Subhash
                 Suri",
  title =        "Summarizing spatial data streams using
                 {ClusterHulls}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412238",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the following problem: given an on-line,
                 possibly unbounded stream of two-dimensional (2D)
                 points, how can we summarize its spatial distribution
                 or {\em shape\/} using a small, bounded amount of
                 memory? We propose a novel scheme, called {\em
                 ClusterHull}, which represents the shape of the stream
                 as a dynamic collection of convex hulls, with a total
                 of at most $m$ vertices, where $m$ is the size of the
                 memory. The algorithm dynamically adjusts both the
                 number of hulls and the number of vertices in each hull
                 to best represent the stream using its fixed-memory
                 budget. This algorithm addresses a problem whose
                 importance is increasingly recognized, namely, the
                 problem of summarizing real-time data streams to enable
                 on-line analytical processing. As a motivating example,
                 consider habitat monitoring using wireless sensor
                 networks. The sensors produce a steady stream of
                 geographic data, namely, the locations of objects being
                 tracked. In order to conserve their limited resources
                 (power, bandwidth, and storage), the sensors can
                 compute, store, and exchange ClusterHull summaries of
                 their data, without losing important geometric
                 information. We are not aware of other schemes
                 specifically designed for capturing shape information
                 in geometric data streams and so we compare ClusterHull
                 with some of the best general-purpose clustering
                 schemes, such as CURE, $k$-medians, and LSEARCH. We
                 show through experiments that ClusterHull is able to
                 represent the shape of two-dimensional data streams
                 more faithfully and flexibly than the stream versions
                 of these clustering algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "convex hull; data streams; geometric data",
}

@Article{Safro:2008:MAL,
  author =       "Ilya Safro and Dorit Ron and Achi Brandt",
  title =        "Multilevel algorithms for linear ordering problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412232",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Linear ordering problems are combinatorial
                 optimization problems that deal with the minimization
                 of different functionals by finding a suitable
                 permutation of the graph vertices. These problems are
                 widely used and studied in many practical and
                 theoretical applications. In this paper, we present a
                 variety of linear--time algorithms for these problems
                 inspired by the Algebraic Multigrid approach, which is
                 based on weighted-edge contraction. The experimental
                 result for four such problems turned out to be better
                 than every known result in almost all cases, while the
                 short (linear) running time of the algorithms enables
                 testing very large graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algebraic multigrid; linear ordering; multilevel
                 algorithm",
}

@Article{Holzer:2008:EMO,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner",
  title =        "Engineering multilevel overlay graphs for
                 shortest-path queries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1412228.1412239",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "An overlay graph of a given graph $G$ = ($V$, $E$) on
                 a subset $S \subseteq V$ is a graph with vertex set $S$
                 and edges corresponding to shortest paths in $G$. In
                 particular, we consider variations of the multilevel
                 overlay graph used in Schulz et al. [2002] to speed up
                 shortest-path computation. In this work, we follow up
                 and present several vertex selection criteria, along
                 with two general strategies of applying these criteria,
                 to determine a subset $S$ of a graph's vertices. The
                 main contribution is a systematic experimental study
                 where we investigate the impact of selection criteria
                 and strategies on multilevel overlay graphs and the
                 resulting speed-up achieved for shortest-path
                 computation: Depending on selection strategy and graph
                 type, a centrality index criterion, selection based on
                 planar separators, and vertex degree turned out to
                 perform best.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Dijkstra's algorithm; hierarchical; multilevel;
                 overlay graph; preprocessing; shortest path; speed-up
                 technique; vertex selection",
}

@Article{Julstrom:2009:GHB,
  author =       "Bryant A. Julstrom",
  title =        "Greedy heuristics for the bounded diameter minimum
                 spanning tree problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a connected, weighted, undirected graph G and a
                 bound $D$, the bounded diameter minimum spanning tree
                 problem seeks a spanning tree on $G$ of minimum weight
                 among the trees in which no path between two vertices
                 contains more than $D$ edges. In Prim's algorithm, the
                 diameter of the growing spanning tree can always be
                 known, so it is a good starting point from which to
                 develop greedy heuristics for the bounded diameter
                 problem. Abdalla, Deo, and Gupta described such an
                 algorithm. It imitates Prim's algorithm but avoids
                 edges whose inclusion in the spanning tree would
                 violate the diameter bound. Running the algorithm from
                 one start vertex requires time that is $O(n^3)$. A
                 modification of this approach uses the start vertex as
                 the center of the spanning tree (if $D$ is even) or as
                 one of the two center vertices (if $D$ is odd). This
                 yields a simpler algorithm whose time is $O(n^2)$. A
                 further modification chooses each next vertex at random
                 rather than greedily, though it still connects each
                 vertex to the growing tree with the lowest-weight
                 feasible edge. On Euclidean problem instances with
                 small diameter bounds, the randomized heuristic is
                 superior to the two fully greedy algorithms, though its
                 advantage fades as the diameter bound grows. On
                 instances whose edge weights have been chosen at
                 random, the fully greedy algorithms outperform the
                 randomized heuristic.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Munro:2009:PSS,
  author =       "J. Ian Munro and Dorothea Wagner",
  title =        "Preface: Section 2 --- Selected Papers from {ALENEX
                 2008}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dumitriu:2009:HMG,
  author =       "Daniel Dumitriu and Stefan Funke and Martin Kutz and
                 Nikola Milosavljevi{\'c}",
  title =        "How much geometry it takes to reconstruct a
                 $2$-manifold in {$R^3$}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Known algorithms for reconstructing a 2-manifold from
                 a point sample in $R^3$ are naturally based on
                 decisions/predicates that take the geometry of the
                 point sample into account. Facing the always present
                 problem of round-off errors that easily compromise the
                 exactness of those predicate decisions, an exact and
                 robust implementation of these algorithms is far from
                 being trivial and typically requires employment of
                 advanced datatypes for exact arithmetic, as provided by
                 libraries like CORE, LEDA, or GMP. In this article, we
                 present a new reconstruction algorithm, one whose main
                 novelties is to throw away geometry information early
                 on in the reconstruction process and to mainly operate
                 combinatorially on a graph structure. More precisely,
                 our algorithm only requires distances between the
                 sample points and not the actual embedding in $R^3$. As
                 such, it is less susceptible to robustness problems due
                 to round-off errors and also benefits from not
                 requiring expensive exact arithmetic by faster running
                 times. A more theoretical view on our algorithm
                 including correctness proofs under suitable sampling
                 conditions can be found in a companion article.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Irving:2009:FLS,
  author =       "Robert W. Irving and David F. Manlove",
  title =        "Finding large stable matchings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "When ties and incomplete preference lists are
                 permitted in the stable marriage and
                 hospitals/residents problems, stable matchings can have
                 different sizes. The problem of finding a maximum
                 cardinality stable matching in this context is known to
                 be NP-hard, even under very severe restrictions on the
                 number, size, and position of ties. In this article, we
                 present two new heuristics for finding large stable
                 matchings in variants of these problems in which ties
                 are on one side only. We describe an empirical study
                 involving these heuristics and the best existing
                 approximation algorithm for this problem. Our results
                 indicate that all three of these algorithms perform
                 significantly better than naive tie-breaking algorithms
                 when applied to real-world and randomly-generated data
                 sets and that one of the new heuristics fares slightly
                 better than the other algorithms, in most cases. This
                 study, and these particular problem variants, are
                 motivated by important applications in large-scale
                 centralized matching schemes.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Basu:2009:GAO,
  author =       "Amitabh Basu and Joseph S. B. Mitchell and Girish
                 Kumar Sabhnani",
  title =        "Geometric algorithms for optimal airspace design and
                 air traffic controller workload balancing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "3:1--3:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The National Airspace System (NAS) is designed to
                 accommodate a large number of flights over North
                 America. For purposes of workload limitations for air
                 traffic controllers, the airspace is partitioned into
                 approximately 600 sectors; each sector is observed by
                 one or more controllers. In order to satisfy workload
                 limitations for controllers, it is important that
                 sectors be designed carefully according to the traffic
                 patterns of flights, so that no sector becomes
                 overloaded. We formulate and study the airspace
                 sectorization problem from an algorithmic
                 point-of-view, modeling the problem of optimal
                 sectorization as a geometric partition problem with
                 constraints. The novelty of the problem is that it
                 partitions data consisting of trajectories of moving
                 points, rather than static point set partitioning that
                 is commonly studied. First, we formulate and solve the
                 1D version of the problem, showing how to partition a
                 line into ``sectors'' (intervals) according to
                 historical trajectory data. Then, we apply the 1D
                 solution framework to design a 2D sectorization
                 heuristic based on binary space partitions. We also
                 devise partitions based on balanced ``pie partitions''
                 of a convex polygon. We evaluate our 2D algorithms
                 experimentally, applying our algorithms to actual
                 historical flight track data for the NAS. We compare
                 the workload balance of our methods to that of the
                 existing set of sectors for the NAS and find that our
                 resectorization yields competitive and improved
                 workload balancing. In particular, our methods yield an
                 improvement by a factor between 2 and 3 over the
                 current sectorization in terms of the time-average and
                 the worst-case workloads of the maximum workload
                 sector. An even better improvement is seen in the
                 standard deviations (over all sectors) of both
                 time-average and worst-case workloads.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bauer:2009:SFR,
  author =       "Reinhard Bauer and Daniel Delling",
  title =        "{SHARC}: Fast and robust unidirectional routing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "4:1--4:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "During recent years, impressive speed-up techniques
                 for Dijkstra's have been developed. Unfortunately, the
                 most advanced techniques use bidirectional search,
                 which makes it hard to use them in scenarios where a
                 backward search is prohibited. Even worse, such
                 scenarios are widely spread (e.g.,
                 timetable-information systems or time-dependent
                 networks). In this work, we present a unidirectional
                 speed-up technique, which competes with bidirectional
                 approaches. Moreover, we show how to exploit the
                 advantage of unidirectional routing for fast exact
                 queries in timetable information systems and for fast
                 approximative queries in time-dependent scenarios. By
                 running experiments on several inputs other than road
                 networks, we show that our approach is very robust to
                 the input.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Coleman:2009:RTL,
  author =       "Tom Coleman and Anthony Wirth",
  title =        "Ranking tournaments: Local search and a new
                 algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Ranking is a fundamental activity for organizing and,
                 later, understanding data. Advice of the form `` a
                 should be ranked before b '' is given. If this advice
                 is consistent, and complete, then there is a total
                 ordering on the data and the ranking problem is
                 essentially a sorting problem. If the advice is
                 consistent, but incomplete, then the problem becomes
                 topological sorting. If the advice is inconsistent,
                 then we have the feedback arc set (FAS) problem: The
                 aim is then to rank a set of items to satisfy as much
                 of the advice as possible. An instance in which there
                 is advice about every pair of items is known as a
                 tournament. This ranking task is equivalent to ordering
                 the nodes of a given directed graph from left to right,
                 while minimizing the number of arcs pointing left. In
                 the past, much work focused on finding good, effective
                 heuristics for solving the problem. Recently, a proof
                 of the NP-completeness of the problem (even when
                 restricted to tournaments) has accompanied new
                 algorithms with approximation guarantees, culminating
                 in the development of a PTAS (polynomial time
                 approximation scheme) for solving FAS on tournaments.
                 In this article, we reexamine many existing algorithms
                 and develop some new techniques for solving FAS. The
                 algorithms are tested on both synthetic and
                 nonsynthetic datasets. We find that, in practice,
                 local-search algorithms are very powerful, even though
                 we prove that they do not have approximation
                 guarantees. Our new algorithm is based on reversing
                 arcs whose nodes have large in-degree differences,
                 eventually leading to a total ordering. Combining this
                 with a powerful local-search technique yields an
                 algorithm that is as strong, or stronger than, existing
                 techniques on a variety of data sets.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cherkassky:2009:SPF,
  author =       "Boris V. Cherkassky and Loukas Georgiadis and Andrew
                 V. Goldberg and Robert E. Tarjan and Renato F.
                 Werneck",
  title =        "Shortest-path feasibility algorithms: An experimental
                 evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This is an experimental study of algorithms for the
                 shortest-path feasibility problem: Given a directed
                 weighted graph, find a negative cycle or present a
                 short proof that none exists. We study previously known
                 and new algorithms. Our testbed is more extensive than
                 those previously used, including both static and
                 incremental problems, as well as worst-case instances.
                 We show that, while no single algorithm dominates, a
                 small subset (including new algorithms) has very robust
                 performance in practice. Our work advances the state of
                 the art in the area.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Demetrescu:2009:P,
  author =       "Camil Demetrescu",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Serna:2009:PSS,
  author =       "Maria Serna and Carme {\'A}lvarez",
  title =        "Preface to special section of selected papers from
                 {WEA 2006}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Maue:2009:GDS,
  author =       "Jens Maue and Peter Sanders and Domagoj Matijevic",
  title =        "Goal-directed shortest-path queries using precomputed
                 cluster distances",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We demonstrate how Dijkstra's algorithm for shortest
                 path queries can be accelerated by using precomputed
                 shortest path distances. Our approach allows a
                 completely flexible tradeoff between query time and
                 space consumption for precomputed distances. In
                 particular, sublinear space is sufficient to give the
                 search a strong ``sense of direction''. We evaluate our
                 approach experimentally using large, real-world road
                 networks.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Valimaki:2009:ECS,
  author =       "N. V{\"a}lim{\"a}ki and V. M{\"a}kinen and W. Gerlach
                 and K. Dixit",
  title =        "Engineering a compressed suffix tree implementation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Suffix tree is one of the most important data
                 structures in string algorithms and biological sequence
                 analysis. Unfortunately, when it comes to implementing
                 those algorithms and applying them to real genomic
                 sequences, often the main memory size becomes the
                 bottleneck. This is easily explained by the fact that
                 while a DNA sequence of length $n$ from alphabet
                 $\Sigma = \{ A, C, G, T \}$ can be stored in $n \log
                 |\Sigma| = 2 n$ bits, its suffix tree occupies $O(n
                 \log n)$ bits. In practice, the size difference easily
                 reaches factor 50. We report on an implementation of
                 the compressed suffix tree very recently proposed by
                 Sadakane (2007). The compressed suffix tree occupies
                 space proportional to the text size, that is, $O(n \log
                 |\Sigma|)$ bits, and supports all typical suffix tree
                 operations with at most $\log n$ factor slowdown. Our
                 experiments show that, for example, on a 10 MB DNA
                 sequence, the compressed suffix tree takes 10\% of the
                 space of the normal suffix tree. At the same time, a
                 representative algorithm is slowed down by factor 30.
                 Our implementation follows the original proposal in
                 spirit, but some internal parts are tailored toward
                 practical implementation. Our construction algorithm
                 has time requirement $O(n \log n \log |\Sigma|)$ and
                 uses closely the same space as the final structure
                 while constructing it: on the 10MB DNA sequence, the
                 maximum space usage during construction is only 1.5
                 times the final product size. As by-products, we
                 develop a method to create Succinct Suffix Array
                 directly from Burrows--Wheeler transform and a
                 space-efficient version of the suffixes-insertion
                 algorithm to build balanced parentheses representation
                 of suffix tree from LCP information.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Eisenbrand:2009:ALO,
  author =       "Friedrich Eisenbrand and Andreas Karrenbauer and
                 Chihao Xu",
  title =        "Algorithms for longer {OLED} lifetime",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider an optimization problem arising in the
                 design of controllers for OLED displays. Our objective
                 is to minimize the amplitude of the electrical current
                 flowing through the diodes, which has a direct impact
                 on the lifetime of such a display. The optimization
                 problem consist of finding a decomposition of an image
                 into subframes with special structural properties that
                 allow the display driver to lower the stress on the
                 diodes. For monochrome images, we present an algorithm
                 that finds an optimal solution of this problem in
                 linear time. Moreover, we consider an online version of
                 the problem in which we have to take a decision for one
                 row based on a constant number of rows in the
                 lookahead. In this framework, this algorithm has a
                 tight competitive ratio. A generalization of this
                 algorithm computes near-optimal solutions of real-world
                 instances in real time.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Englert:2009:EOS,
  author =       "Matthias Englert and Heiko R{\"o}glin and Matthias
                 Westermann",
  title =        "Evaluation of online strategies for reordering
                 buffers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A sequence of objects that are characterized by their
                 color has to be processed. Their processing order
                 influences how efficiently they can be processed: Each
                 color change between two consecutive objects produces
                 costs. A reordering buffer, which is a random access
                 buffer with storage capacity for k objects, can be used
                 to rearrange this sequence online in such a way that
                 the total costs are reduced. This concept is useful for
                 many applications in computer science and economics.
                 The strategy with the best-known competitive ratio is
                 MAP. An upper bound of $O(\log k)$ on the competitive
                 ratio of MAP is known and a nonconstant lower bound on
                 the competitive ratio is not known. Based on
                 theoretical considerations and experimental
                 evaluations, we give strong evidence that the
                 previously used proof techniques are not suitable to
                 show an $o (\sqrt{\log k})$ upper bound on the
                 competitive ratio of MAP. However, we also give some
                 evidence that in fact MAP achieves a competitive ratio
                 of $O(1)$. Further, we evaluate the performance of
                 several strategies on random input sequences
                 experimentally. MAP and its variants RC and RR clearly
                 outperform the other strategies FIFO, LRU, and MCF. In
                 particular, MAP, RC, and RR are the only known
                 strategies whose competitive ratios do not depend on
                 the buffer size. Furthermore, MAP achieves the smallest
                 competitive ratio.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Farshi:2009:ESG,
  author =       "Mohammad Farshi and Joachim Gudmundsson",
  title =        "Experimental study of geometric $t$-spanners",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The construction of t -spanners of a given point set
                 has received a lot of attention, especially from a
                 theoretical perspective. In this article, we
                 experimentally study the performance and quality of the
                 most common construction algorithms for points in the
                 Euclidean plane. We implemented the most well-known t
                 -spanner algorithms and tested them on a number of
                 different point sets. The experiments are discussed and
                 compared to the theoretical results, and in several
                 cases, we suggest modifications that are implemented
                 and evaluated. The measures of quality that we consider
                 are the number of edges, the weight, the maximum
                 degree, the spanner diameter, and the number of
                 crossings. This is the first time an extensive
                 comparison has been made between the running times of
                 construction algorithms of t -spanners and the quality
                 of the generated spanners.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cederman:2009:GQP,
  author =       "Daniel Cederman and Philippas Tsigas",
  title =        "{GPU-Quicksort}: a practical {Quicksort} algorithm for
                 graphics processors",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we describe GPU-Quicksort, an
                 efficient Quicksort algorithm suitable for highly
                 parallel multicore graphics processors. Quicksort has
                 previously been considered an inefficient sorting
                 solution for graphics processors, but we show that in
                 CUDA, NVIDIA's programming platform for general-purpose
                 computations on graphical processors, GPU-Quicksort
                 performs better than the fastest-known sorting
                 implementations for graphics processors, such as radix
                 and bitonic sort. Quicksort can thus be seen as a
                 viable alternative for sorting large quantities of data
                 on graphics processors.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2009:EEC,
  author =       "Markus Chimani and Carsten Gutwenger and Petra
                 Mutzel",
  title =        "Experiments on exact crossing minimization using
                 column generation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The crossing number of a graph G is the smallest
                 number of edge crossings in any drawing of G into the
                 plane. Recently, the first branch-and-cut approach for
                 solving the crossing number problem has been presented
                 in Buchheim et al. [2005]. Its major drawback was the
                 huge number of variables out of which only very few
                 were actually used in the optimal solution. This
                 restricted the algorithm to rather small graphs with
                 low crossing number. In this article, we discuss two
                 column generation schemes; the first is based on
                 traditional algebraic pricing, and the second uses
                 combinatorial arguments to decide whether and which
                 variables need to be added. The main focus of this
                 article is the experimental comparison between the
                 original approach and these two schemes. In addition,
                 we evaluate the quality achieved by the best-known
                 crossing number heuristic by comparing the new results
                 with the results of the heuristic.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Putze:2009:CHS,
  author =       "Felix Putze and Peter Sanders and Johannes Singler",
  title =        "Cache-, hash-, and space-efficient {Bloom} filters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A Bloom filter is a very compact data structure that
                 supports approximate membership queries on a set,
                 allowing false positives. We propose several new
                 variants of Bloom filters and replacements with similar
                 functionality. All of them have a better
                 cache-efficiency and need less hash bits than regular
                 Bloom filters. Some use SIMD functionality, while the
                 others provide an even better space efficiency. As a
                 consequence, we get a more flexible trade-off between
                 false-positive rate, space-efficiency,
                 cache-efficiency, hash-efficiency, and computational
                 effort. We analyze the efficiency of Bloom filters and
                 the proposed replacements in detail, in terms of the
                 false-positive rate, the number of expected
                 cache-misses, and the number of required hash bits. We
                 also describe and experimentally evaluate the
                 performance of highly tuned implementations. For many
                 settings, our alternatives perform better than the
                 methods proposed so far.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2009:OOC,
  author =       "Markus Chimani and Maria Kandyba and Ivana Ljubi{\'c}
                 and Petra Mutzel",
  title =        "Obtaining optimal $k$-cardinality trees fast",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2.5:1--2.5:23",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1498698.1537600",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given an undirected graph G = (V, E) with edge weights
                 and a positive integer number k, the k -cardinality
                 tree problem consists of finding a subtree T of G with
                 exactly k edges and the minimum possible weight. Many
                 algorithms have been proposed to solve this NP-hard
                 problem, resulting in mainly heuristic and
                 metaheuristic approaches. In this article, we present
                 an exact ILP-based algorithm using directed cuts. We
                 mathematically compare the strength of our formulation
                 to the previously known ILP formulations of this
                 problem, and show the advantages of our approach.
                 Afterwards, we give an extensive study on the
                 algorithm's practical performance compared to the
                 state-of-the-art metaheuristics. In contrast to the
                 widespread assumption that such a problem cannot be
                 efficiently tackled by exact algorithms for medium and
                 large graphs (between 200 and 5,000 nodes), our results
                 show that our algorithm not only has the advantage of
                 proving the optimality of the computed solution, but
                 also often outperforms the metaheuristic approaches in
                 terms of running time.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Frias:2009:LRC,
  author =       "Leonor Frias and Jordi Petit and Salvador Roura",
  title =        "Lists revisited: Cache-conscious {STL} lists",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three cache-conscious implementations of
                 STL standard compliant lists. Until now, one could
                 either find simple doubly linked list implementations
                 that easily cope with standard strict requirements, or
                 theoretical approaches that do not take into account
                 any of these requirements in their design. In contrast,
                 we have merged both approaches, paying special
                 attention to iterators constraints. In this article,
                 the competitiveness of our implementations is evinced
                 with an extensive experimental analysis. This shows,
                 for instance, 5 to 10 times faster traversals and 3 to
                 5 times faster internal sort.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Holzer:2009:EPS,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner and
                 Grigorios Prasinos and Christos Zaroliagis",
  title =        "Engineering planar separator algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider classical linear-time planar separator
                 algorithms, determining for a given planar graph a
                 small subset of its nodes whose removal divides the
                 graph into two components of similar size. These
                 algorithms are based on planar separator theorems,
                 which guarantee separators of size $O(\sqrt n)$ and
                 remaining components of size at most $2 n / 3$ (where
                 $n$ denotes the number of nodes in the graph). In this
                 article, we present a comprehensive experimental study
                 of the classical algorithms applied to a large variety
                 of graphs, where our main goal is to find separators
                 that do not only satisfy upper bounds, but also possess
                 other desirable characteristics with respect to
                 separator size and component balance. We achieve this
                 by investigating a number of specific alternatives for
                 the concrete implementation and fine-tuning of certain
                 parts of the classical algorithms. It is also shown
                 that the choice of several parameters influences the
                 separation quality considerably. Moreover, we propose
                 as planar separators the usage of fundamental cycles,
                 whose size is at most twice the diameter of the graph:
                 For graphs of small diameter, the guaranteed bound is
                 better than the $O(\sqrt n)$ bounds, and it turns out
                 that this simple strategy almost always outperforms the
                 other algorithms, even for graphs with large
                 diameter.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tarjan:2009:DTP,
  author =       "Robert E. Tarjan and Renato F. Werneck",
  title =        "Dynamic trees in practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Dynamic tree data structures maintain forests that
                 change over time through edge insertions and deletions.
                 Besides maintaining connectivity information in
                 logarithmic time, they can support aggregation of
                 information over paths, trees, or both. We perform an
                 experimental comparison of several versions of dynamic
                 trees: ST-trees, ET-trees, RC-trees, and two variants
                 of top trees (self-adjusting and worst-case). We
                 quantify their strengths and weaknesses through tests
                 with various workloads, most stemming from practical
                 applications. We observe that a simple, linear-time
                 implementation is remarkably fast for graphs of small
                 diameter, and that worst-case and randomized data
                 structures are best when queries are very frequent. The
                 best overall performance, however, is achieved by
                 self-adjusting ST-trees.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Avdil:2009:LSS,
  author =       "Alaubek Avdil and Karsten Weihe",
  title =        "Local search starting from an {LP} solution: Fast and
                 quite good",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and evaluate a specific way to generate
                 good start solutions for local search. The start
                 solution is computed from a certain LP, which is
                 related to the underlying problem. We consider three
                 optimization problems: the directed MAX-CUT problem
                 with a source and a sink and two variations of the MAX-
                 k -SAT problem with k = 2 and k = 3. To compare our
                 technique, we run local search repeatedly with random
                 start solutions. Our technique produces, consistently,
                 final solutions whose objective values are not too far
                 from the best solutions from repeated random starts.
                 The surprising degree of stability and uniformity of
                 this result throughout all of our experiments on
                 various classes of instances strongly suggests that we
                 have consistently achieved nearly optimal solutions. On
                 the other hand, the runtime of our technique is rather
                 small, so the technique is very efficient and probably
                 quite accurate.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Deineko:2009:FMW,
  author =       "Vladimir Deineko and Alexander Tiskin",
  title =        "Fast minimum-weight double-tree shortcutting for
                 metric {TSP}: Is the best one good enough?",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Metric Traveling Salesman Problem (TSP) is a
                 classical NP-hard optimization problem. The double-tree
                 shortcutting method for Metric TSP yields an
                 exponentially-sized space of TSP tours, each of which
                 approximates the optimal solution within, at most, a
                 factor of 2. We consider the problem of finding among
                 these tours the one that gives the closest
                 approximation, that is, the minimum-weight double-tree
                 shortcutting. Burkard et al. gave an algorithm for this
                 problem, running in time $O(n^3 + 2^d n^2)$ and memory
                 $O(2^d n^2)$, where $d$ is the maximum node degree in
                 the rooted minimum spanning tree. We give an improved
                 algorithm for the case of small $d$ (including planar
                 Euclidean TSP, where $d \leq 4$), running in time
                 $O(4^d n^2)$ and memory $O(4^d n)$. This improvement
                 allows one to solve the problem on much larger
                 instances than previously attempted. Our computational
                 experiments suggest that in terms of the time-quality
                 trade-off, the minimum-weight double-tree shortcutting
                 method provides one of the best existing
                 tour-constructing heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Figueroa:2009:SSA,
  author =       "Karina Figueroa and Edgar Chavez and Gonzalo Navarro
                 and Rodrigo Paredes",
  title =        "Speeding up spatial approximation search in metric
                 spaces",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Proximity searching consists of retrieving from a
                 database those elements that are similar to a query
                 object. The usual model for proximity searching is a
                 metric space where the distance, which models the
                 proximity, is expensive to compute. An index uses
                 precomputed distances to speedup query processing.
                 Among all the known indices, the baseline for
                 performance for about 20 years has been AESA. This
                 index uses an iterative procedure, where at each
                 iteration it first chooses the next promising element
                 (``pivot'') to compare to the query, and then it
                 discards database elements that can be proved not
                 relevant to the query using the pivot. The next pivot
                 in AESA is chosen as the one minimizing the sum of
                 lower bounds to the distance to the query proved by
                 previous pivots. In this article, we introduce the new
                 index iAESA, which establishes a new performance
                 baseline for metric space searching. The difference
                 with AESA is the method to select the next pivot. In
                 iAESA, each candidate sorts previous pivots by
                 closeness to it, and chooses the next pivot as the
                 candidate whose order is most similar to that of the
                 query. We also propose a modification to AESA-like
                 algorithms to turn them into probabilistic algorithms.
                 Our empirical results confirm a consistent improvement
                 in query performance. For example, we perform as few as
                 60\% of the distance evaluations of AESA in a database
                 of documents, a very important and difficult real-life
                 instance of the problem. For the probabilistic
                 algorithm, we perform in a database of faces up to 40\%
                 of the comparisons made by the best alternative
                 algorithm to retrieve the same percentage of the
                 correct answer. Based on the empirical results, we
                 conjecture that the new probabilistic AESA-like
                 algorithms will become, as AESA had been for exact
                 algorithms, a reference point establishing, in
                 practice, a lower bound on how good a probabilistic
                 proximity search algorithm can be.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Barbay:2009:EIS,
  author =       "J{\'e}r{\'e}my Barbay and Alejandro L{\'o}pez-Ortiz
                 and Tyler Lu and Alejandro Salinger",
  title =        "An experimental investigation of set intersection
                 algorithms for text searching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The intersection of large ordered sets is a common
                 problem in the context of the evaluation of boolean
                 queries to a search engine. In this article, we propose
                 several improved algorithms for computing the
                 intersection of sorted arrays, and in particular for
                 searching sorted arrays in the intersection context. We
                 perform an experimental comparison with the algorithms
                 from the previous studies from Demaine,
                 L{\'o}pez-Ortiz, and Munro [ALENEX 2001] and from
                 Baeza-Yates and Salinger [SPIRE 2005]; in addition, we
                 implement and test the intersection algorithm from
                 Barbay and Kenyon [SODA 2002] and its randomized
                 variant [SAGA 2003]. We consider both the random data
                 set from Baeza-Yates and Salinger, the Google queries
                 used by Demaine et al., a corpus provided by Google,
                 and a larger corpus from the TREC Terabyte 2006
                 efficiency query stream, along with its own query log.
                 We measure the performance both in terms of the number
                 of comparisons and searches performed, and in terms of
                 the CPU time on two different architectures. Our
                 results confirm or improve the results from both
                 previous studies in their respective context
                 (comparison model on real data, and CPU measures on
                 random data) and extend them to new contexts. In
                 particular, we show that value-based search algorithms
                 perform well in posting lists in terms of the number of
                 comparisons performed.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Estivill-Castro:2009:RRD,
  author =       "Vladimir Estivill-Castro and Apichat Heednacram and
                 Francis Suraweera",
  title =        "Reduction rules deliver efficient {FPT}-algorithms for
                 covering points with lines",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present efficient algorithms to solve the Line
                 Cover Problem exactly. In this NP-complete problem, the
                 inputs are n points in the plane and a positive integer
                 k, and we are asked to answer if we can cover these n
                 points with at most k lines. Our approach is based on
                 fixed-parameter tractability and, in particular,
                 kernelization. We propose several reduction rules to
                 transform instances of Line Cover into equivalent
                 smaller instances. Once instances are no longer
                 susceptible to these reduction rules, we obtain a
                 problem kernel whose size is bounded by a polynomial
                 function of the parameter k and does not depend on the
                 size n of the input. Our algorithms provide exact
                 solutions and are easy to implement. We also describe
                 the design of algorithms to solve the corresponding
                 optimization problem exactly. We experimentally
                 evaluated ten variants of the algorithms to determine
                 the impact and trade-offs of several reduction rules.
                 We show that our approach provides tractability for a
                 larger range of values of the parameter and larger
                 inputs, improving the execution time by several orders
                 of magnitude with respect to earlier algorithms that
                 use less rules.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{DeLoera:2009:CMM,
  author =       "Jes{\'u}s A. {De Loera} and David C. Haws and Jon Lee
                 and Allison O'Hair",
  title =        "Computation in multicriteria matroid optimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Motivated by recent work on algorithmic theory for
                 nonlinear and multicriteria matroid optimization, we
                 have developed algorithms and heuristics aimed at
                 practical solution of large instances of some of these
                 difficult problems. Our methods primarily use the local
                 adjacency structure inherent in matroid polytopes to
                 pivot to feasible solutions, which may or may not be
                 optimal. We also present a modified
                 breadth-first-search heuristic that uses adjacency to
                 enumerate a subset of feasible solutions. We present
                 other heuristics and provide computational evidence
                 supporting our techniques. We implemented all of our
                 algorithms in the software package MOCHA.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Jacobs:2010:ESR,
  author =       "Tobias Jacobs",
  title =        "An experimental study of recent hotlink assignment
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1671970.1671971",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The concept of {\em hotlink assignment\/} aims at
                 enhancing the structure of Web sites such that the
                 user's expected navigation effort is minimized. We
                 concentrate on sites that are representable by trees
                 and assume that each leaf carries a weight representing
                 its popularity.\par

                 The problem of optimally adding at most one additional
                 outgoing edge (``hotlink'') to each inner node has been
                 widely studied. A considerable number of approximation
                 algorithms have been proposed and worst-case bounds for
                 the quality of the computed solutions have been given.
                 However, only little is known about the practical
                 behavior of most of these algorithms.\par

                 This article contributes to closing this gap by
                 evaluating all recently proposed strategies
                 experimentally. Our experiments are based on trees
                 extracted from real Web sites, as well as on synthetic
                 instances. The latter are generated by a new method
                 that simulates the growth of a Web site over time.
                 Finally, we present a new heuristic that is easy to
                 implement and exhibits excellent behavior in
                 practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation; hotlink; Search tree",
}

@Article{Spence:2010:SGS,
  author =       "Ivor Spence",
  title =        "{{\tt sgen1}}: a generator of small but difficult
                 satisfiability benchmarks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1671970.1671972",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The satisfiability problem is known to be NP-Complete;
                 therefore, there should be relatively small problem
                 instances that take a very long time to solve. However,
                 most of the smaller benchmarks that were once thought
                 challenging, especially the satisfiable ones, can be
                 processed quickly by modern SAT-solvers. We describe
                 and make available a generator that produces both
                 unsatisfiable and, more significantly, satisfiable
                 formulae that take longer to solve than any others
                 known. At the two most recent international SAT
                 Competitions, the smallest unsolved benchmarks were
                 created by this generator. We analyze the results of
                 all solvers in the most recent competition when applied
                 to these benchmarks and also present our own more
                 focused experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "SAT-solvers; Satisfiability benchmarks",
}

@Article{Langguth:2010:HIB,
  author =       "Johannes Langguth and Fredrik Manne and Peter
                 Sanders",
  title =        "Heuristic initialization for bipartite matching
                 problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1712655.1712656",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "It is a well-established result that improved pivoting
                 in linear solvers can be achieved by computing a
                 bipartite matching between matrix entries and positions
                 on the main diagonal. With the availability of
                 increasingly faster linear solvers, the speed of
                 bipartite matching computations must keep up to avoid
                 slowing down the main computation. Fast algorithms for
                 bipartite matching, which are usually initialized with
                 simple heuristics, have been known for a long time.
                 However, the performance of these algorithms is largely
                 dependent on the quality of the heuristic. We compare
                 combinations of several known heuristics and exact
                 algorithms to find fast combined methods, using
                 real-world matrices as well as randomly generated
                 instances. In addition, we present a new heuristic
                 aimed at obtaining high-quality matchings and compare
                 its impact on bipartite matching algorithms with that
                 of other heuristics. The experiments suggest that its
                 performance compares favorably to the best-known
                 heuristics, and that it is especially suited for
                 application in linear solvers.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Heuristics; matching",
}

@Article{Delbot:2010:AEC,
  author =       "Fran{\c{c}}ois Delbot and Christian Laforest",
  title =        "Analytical and experimental comparison of six
                 algorithms for the vertex cover problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "14:1--14:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1865970.1865971",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The vertex cover is a well-known NP-complete
                 minimization problem in graphs that has received a lot
                 of attention these last decades. Many algorithms have
                 been proposed to construct vertex cover in different
                 contexts (offline, online, list algorithms, etc.)
                 leading to solutions of different level of quality.
                 This quality is traditionally measured in terms of
                 approximation ratio, that is, the worst possible ratio
                 between the quality of the solution constructed and the
                 optimal one. For the vertex cover problem the range of
                 such known ratios are between 2 (conjectured as being
                 the smallest constant ratio) and $\Delta$, the maximum
                 degree of the graph. Based on this measure of quality,
                 the hierarchy is almost clear (the smaller the ratio
                 is, the better the algorithm is). In this article, we
                 show that this measure, although of great importance,
                 is too macroscopic and does not reflect the practical
                 behavior of the methods. We prove this by analyzing
                 (known and recent) algorithms running on a particular
                 class of graphs: the paths. We obtain closed and exact
                 formulas for the mean of the sizes of vertex cover
                 constructed by these different algorithms. Then, we
                 assess their quality experimentally in several
                 well-chosen class of graphs (random, regular, trees,
                 BHOSLIB benchmarks, trap graphs, etc.). The synthesis
                 of all these results lead us to formulate a ``practical
                 hierarchy'' of the algorithms. We remark that it is,
                 more or less, the opposite to the one only based on
                 approximation ratios, showing that worst-case analysis
                 only gives partial information on the quality of an
                 algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Arroyuelo:2010:PAR,
  author =       "Diego Arroyuelo and Gonzalo Navarro",
  title =        "Practical approaches to reduce the space requirement
                 of {Lempel--Ziv}-based compressed text indices",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "15:1--15:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1883684",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a text $T [1.\,.n]$ over an alphabet of size
                 $\sigma$, the full-text search problem consists in
                 locating the occ occurrences of a given pattern
                 $P[1.\,.m]$ in $T$. Compressed full-text self-indices
                 are space-efficient representations of the text that
                 provide direct access to and indexed search on
                 it.\par

                 The LZ-index of Navarro is a compressed full-text
                 self-index based on the LZ78 compression algorithm.
                 This index requires about 5 times the size of the
                 compressed text (in theory, $4 n H_k(T) + o(n \log
                 \sigma)$ bits of space, where $H_k(T)$ is the $k$-th
                 order empirical entropy of $T$). In practice, the
                 average locating complexity of the LZ-index is
                 $O(\sigma m \log_\sigma n + {\rm occ} \sigma^{m / 2})$,
                 where {\em occ} is the number of occurrences of $P$. It
                 can extract text substrings of length $l$ in $O(l)$
                 time. This index outperforms competing schemes both to
                 locate short patterns and to extract text snippets.
                 However, the LZ-index can be up to 4 times larger than
                 the smallest existing indices (which use $n H_k(T) +
                 o(n \log \sigma)$ bits in theory), and it does not
                 offer space/time tuning options. This limits its
                 applicability.\par

                 In this article, we study practical ways to reduce the
                 space of the LZ-index. We obtain new LZ-index variants
                 that require $2(1 + \epsilon) n H_k(T) + o(n \log
                 \sigma)$ bits of space, for any $0 < \epsilon < 1$.
                 They have an average locating time of $O(1 / \epsilon
                 (m \log n + {\rm occ} \sigma^{m / 2}))$, while
                 extracting takes $O(l)$ time.\par

                 We perform extensive experimentation and conclude that
                 our schemes are able to reduce the space of the
                 original LZ-index by a factor of $2/3$, that is, around
                 $3$ times the compressed text size. Our schemes are
                 able to extract about 1 to 2 MB of the text per second,
                 being twice as fast as the most competitive
                 alternatives. Pattern occurrences are located at a rate
                 of up to 1 to 4 million per second. This constitutes
                 the best space\slash time trade-off when indices are
                 allowed to use 4 times the size of the compressed text
                 or more.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ullmann:2010:BVA,
  author =       "Julian R. Ullmann",
  title =        "Bit-vector algorithms for binary constraint
                 satisfaction and subgraph isomorphism",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "16:1--16:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1921702",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A solution to a binary constraint satisfaction problem
                 is a set of discrete values, one in each of a given set
                 of domains, subject to constraints that allow only
                 prescribed pairs of values in specified pairs of
                 domains. Solutions are sought by backtrack search
                 interleaved with a process that removes from domains
                 those values that are currently inconsistent with
                 provisional choices already made in the course of
                 search. For each value in a given domain, a bit-vector
                 shows which values in another domain are or are not
                 permitted in a solution. Bit-vector representation of
                 constraints allows bit-parallel, therefore fast,
                 operations for editing domains during search. This
                 article revises and updates bit-vector algorithms
                 published in the 1970's, and introduces focus search,
                 which is a new bit-vector algorithm relying more on
                 search and less on domain-editing than previous
                 algorithms. Focus search is competitive within a
                 limited family of constraint satisfaction problems.
                 Determination of subgraph isomorphism is a specialized
                 binary constraint satisfaction problem for which
                 bit-vector algorithms have been widely used since the
                 1980s, particularly for matching molecular structures.
                 This article very substantially updates the author's
                 1976 subgraph isomorphism algorithm, and reports
                 experimental results with random and real-life data.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Askitis:2010:RSH,
  author =       "Nikolas Askitis and Justin Zobel",
  title =        "Redesigning the string hash table, burst trie, and
                 {BST} to exploit cache",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "17:1--17:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1921704",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A key decision when developing in-memory computing
                 applications is choice of a mechanism to store and
                 retrieve strings. The most efficient current data
                 structures for this task are the hash table with
                 move-to-front chains and the burst trie, both of which
                 use linked lists as a substructure, and variants of
                 binary search tree. These data structures are
                 computationally efficient, but typical implementations
                 use large numbers of nodes and pointers to manage
                 strings, which is not efficient in use of cache. In
                 this article, we explore two alternatives to the
                 standard representation: the simple expedient of
                 including the string in its node, and, for linked
                 lists, the more drastic step of replacing each list of
                 nodes by a contiguous array of characters. Our
                 experiments show that, for large sets of strings, the
                 improvement is dramatic. For hashing, in the best case
                 the total space overhead is reduced to less than 1 bit
                 per string. For the burst trie, over 300MB of strings
                 can be stored in a total of under 200MB of memory with
                 significantly improved search time. These results, on a
                 variety of data sets, show that cache-friendly variants
                 of fundamental data structures can yield remarkable
                 gains in performance.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{McGeoch:2010:P,
  author =       "Catherine C. McGeoch",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1671974",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2010:LFU,
  author =       "Markus Chimani and Carsten Gutwenger and Petra Mutzel
                 and Hoi-Ming Wong",
  title =        "Layer-free upward crossing minimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1671975",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "An upward drawing of a DAG $G$ is a drawing of $G$ in
                 which all arcs are drawn as curves increasing
                 monotonically in the vertical direction. In this
                 article, we present a new approach for upward crossing
                 minimization, that is, finding an upward drawing of a
                 DAG $G$ with as few crossings as possible. Our
                 algorithm is based on a two-stage upward planarization
                 approach, which computes a feasible upward planar
                 subgraph in the first step and reinserts the remaining
                 arcs by computing constraint-feasible upward insertion
                 paths. An experimental study shows that the new
                 algorithm leads to much better results than existing
                 algorithms for upward crossing minimization, including
                 the classical Sugiyama approach.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Crossing number; planarization approach; upward
                 drawing; upward planarization",
}

@Article{Bauer:2010:CHG,
  author =       "Reinhard Bauer and Daniel Delling and Peter Sanders
                 and Dennis Schieferdecker and Dominik Schultes and
                 Dorothea Wagner",
  title =        "Combining hierarchical and goal-directed speed-up
                 techniques for {Dijkstra}'s algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1671976",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In recent years, highly effective hierarchical and
                 goal-directed speed-up techniques for routing in large
                 road networks have been developed. This article makes a
                 systematic study of combinations of such techniques.
                 These combinations turn out to give the best results in
                 many scenarios, including graphs for unit disk graphs,
                 grid networks, and time-expanded timetables. Besides
                 these quantitative results, we obtain general insights
                 for successful combinations.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Dijkstra's algorithm; speed-up technique",
}

@Article{Nash:2010:CID,
  author =       "Nicholas Nash and David Gregg",
  title =        "Comparing integer data structures for 32- and 64-bit
                 keys",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1671977",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we experimentally compare a number of
                 data structures operating over keys that are 32- and
                 64-bit integers. We examine traditional
                 comparison-based search trees as well as data
                 structures that take advantage of the fact that the
                 keys are integers such as van Emde Boas trees and
                 various trie-based data structures. We propose a
                 variant of a burst trie that performs better in time
                 than all the alternative data structures. In addition,
                 even for small sets of keys, this burst trie variant
                 occupies less space than comparison-based data
                 structures such as red-black trees and $B$-trees. Burst
                 tries have previously been shown to provide a very
                 efficient base for implementing cache efficient string
                 sorting algorithms. We find that with suitable
                 engineering, they also perform excellently as a dynamic
                 ordered data structure operating over integer keys. We
                 provide experimental results when the data structures
                 operate over uniform random data. We also present
                 experimental results for other types of data, including
                 datasets arising from {\em Valgrind}, a widely used
                 suite of tools for the dynamic binary instrumentation
                 of programs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Integer keys; level compression; searching; trees;
                 tries",
}

@Article{Sinha:2010:EBT,
  author =       "Ranjan Sinha and Anthony Wirth",
  title =        "Engineering burstsort: Toward fast in-place string
                 sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.5:1--2.5:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1671970.1671978",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Burstsort is a trie-based string sorting algorithm
                 that distributes strings into small buckets whose
                 contents are then sorted in cache. This approach has
                 earlier been demonstrated to be efficient on modern
                 cache-based processors [Sinha \& Zobel, JEA 2004]. In
                 this article, we introduce improvements that reduce by
                 a significant margin the memory requirement of
                 Burstsort: It is now less than 1\% greater than an
                 in-place algorithm. These techniques can be applied to
                 existing variants of Burstsort, as well as other string
                 algorithms such as for string management.\par

                 We redesigned the buckets, introducing sub-buckets and
                 an index structure for them, which resulted in an
                 order-of-magnitude space reduction. We also show the
                 practicality of moving some fields from the trie nodes
                 to the insertion point (for the next string pointer) in
                 the bucket; this technique reduces memory usage of the
                 trie nodes by one-third. Importantly, the trade-off for
                 the reduction in memory use is only a very slight
                 increase in the running time of Burstsort on real-world
                 string collections. In addition, during the
                 bucket-sorting phase, the string suffixes are copied to
                 a small buffer to improve their spatial locality,
                 lowering the running time of Burstsort by up to 30\%.
                 These memory usage enhancements have enabled the
                 copy-based approach [Sinha et al., JEA 2006] to also
                 reduce the memory usage with negligible impact on
                 speed.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; cache; experimental algorithms; Sorting;
                 string management; tries",
}

@Article{Boytsov:2011:IMA,
  author =       "Leonid Boytsov",
  title =        "Indexing methods for approximate dictionary searching:
                 Comparative analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        may,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1963191",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 30 08:26:05 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The primary goal of this article is to survey
                 state-of-the-art indexing methods for approximate
                 dictionary searching. To improve understanding of the
                 field, we introduce a taxonomy that classifies all
                 methods into direct methods and sequence-based
                 filtering methods. We focus on infrequently updated
                 dictionaries, which are used primarily for retrieval.
                 Therefore, we consider indices that are optimized for
                 retrieval rather than for update. The indices are
                 assumed to be associative, that is, capable of storing
                 and retrieving auxiliary information, such as string
                 identifiers. All solutions are lossless and guarantee
                 retrieval of strings within a specified edit distance
                 $k$. Benchmark results are presented for the
                 practically important cases of $k = 1, 2$, and $3$.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Biro:2011:SMC,
  author =       "P{\'e}ter Bir{\'o} and Robert W. Irving and Ildik{\'o}
                 Schlotter",
  title =        "Stable matching with couples: an empirical study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "12:1--12:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1970372",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In practical applications, algorithms for the classic
                 version of the hospitals residents problem (the
                 many-one version of the stable marriage problem) may
                 have to be extended to accommodate the needs of couples
                 who wish to be allocated to (geographically) compatible
                 places. Such an extension has been in operation in the
                 National Resident Matching Problem (NRMP) matching
                 scheme in the United States for a number of years. In
                 this setting, a stable matching need not exist, and it
                 is an NP-complete problem to decide if one does.
                 However, the only previous empirical study in this
                 context (focused on the NRMP algorithm), together with
                 information from NRMP, suggest that, in practice,
                 stable matchings do exist and that an appropriate
                 heuristic can be used to find such a matching. The
                 study presented here was motivated by the recent
                 decision to accommodate couples in the Scottish
                 Foundation Allocation Scheme (SFAS), the Scottish
                 equivalent of the NRMP. Here, the problem is a special
                 case, since hospital preferences are derived from a
                 ``master list'' of resident scores, but we show that
                 the existence problem remains NP-complete in this case.
                 We describe the algorithm used in SFAS and contrast it
                 with a version of the algorithm that forms the basis of
                 the NRMP approach. We also propose a third simpler
                 algorithm based on satisfying blocking pairs, and an
                 FPT algorithm when the number of couples is viewed as a
                 parameter. We present an empirical study of the
                 performance of a number of variants of these algorithms
                 using a range of datasets. The results indicate that,
                 not surprisingly, increasing the ratio of couples to
                 single applicants typically makes it harder to find a
                 stable matching (and, by inference, less likely that a
                 stable matching exists). However, the likelihood of
                 finding a stable matching is very high for realistic
                 values of this ratio, and especially so for particular
                 variants of the algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Huber:2011:MGS,
  author =       "Stefan Huber and Martin Held",
  title =        "Motorcycle graphs: Stochastic properties motivate an
                 efficient yet simple implementation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "13:1--13:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2019578",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we study stochastic properties of a
                 geometric setting that underpins random motorcycle
                 graphs and use it to motivate a simple but very
                 efficient algorithm for computing motorcycle graphs. An
                 analysis of the mean trace length of $n$ random
                 motorcycles suggests that, on average, a motorcycle
                 crosses only a constant number of cells within a $\sqrt
                 n \times \sqrt n$ rectangular grid, provided that the
                 motorcycles are distributed sufficiently uniformly over
                 the area covered by the grid. This analysis motivates a
                 simple algorithm for computing motorcycle graphs: We
                 use the standard priority-queue--based algorithm and
                 enhance it with geometric hashing by means of a
                 rectangular grid. If the motorcycles are distributed
                 sufficiently uniformly, then our stochastic analysis
                 predicts an $O(n \log n)$ runtime. Indeed, extensive
                 experiments run on 22,000 synthetic and real-world
                 datasets confirm a runtime of less than $10^{-5} n \log
                 n$ seconds for the vast majority of our datasets on a
                 standard PC. Further experiments with our software,
                 Moca, also confirm the mean trace length and average
                 number of cells crossed by a motorcycle, as predicted
                 by our analysis. This makes Moca the first
                 implementation that is efficient enough to be applied
                 in practice for computing motorcycle graphs of large
                 datasets. Actually, it is easy to extend Moca to make
                 it compute a generalized version of the original
                 motorcycle graph, thus enabling a significantly larger
                 field of applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Michail:2011:ECS,
  author =       "Dimitrios Michail",
  title =        "An experimental comparison of single-sided preference
                 matching algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "14:1--14:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2019579",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We experimentally study the problem of assigning
                 applicants to posts. Each applicant provides a
                 preference list, which may contain ties, ranking a
                 subset of the posts. Different optimization criteria
                 may be defined, which depend on the desired solution
                 properties. The main focus of this work is to assess
                 the quality of matchings computed by rank-maximal and
                 popular matching algorithms and compare this with the
                 minimum weight matching algorithm, which is a standard
                 matching algorithm that is used in practice. Both
                 rank-maximal and popular matching algorithms use common
                 algorithmic techniques, which makes them excellent
                 candidates for a running time comparison. Since popular
                 matchings do not always exist, we also study the
                 unpopularity of matchings computed by the
                 aforementioned algorithms. Finally, extra criteria like
                 total weight and cardinality are included, due to their
                 importance in practice. All experiments are performed
                 using structured random instances as well as instances
                 created using real-world datasets.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kot:2011:ECP,
  author =       "Andriy Kot and Andrey N. Chernikov and Nikos P.
                 Chrisochoides",
  title =        "Effective out-of-core parallel {Delaunay} mesh
                 refinement using off-the-shelf software",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "15:1--15:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2019580",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three related out-of-core parallel mesh
                 generation algorithms and their implementations for
                 small size computational clusters. Computing
                 out-of-core permits to solve larger problems than
                 otherwise possible on the same hardware setup. Also,
                 when using shared computing resources with high demand,
                 a problem can take longer to compute in terms of
                 wall-clock time when using an in-core algorithm on many
                 nodes instead of using an out-of-core algorithm on few
                 nodes. The difference is due to wait-in-queue delays
                 that can grow exponentially to the number of requested
                 nodes. In one specific case, using our best method and
                 only 16 nodes it can take several times less wall-clock
                 time to generate a 2 billion element mesh than to
                 generate the same size mesh in-core with 121 nodes.
                 Although our best out-of-core method exhibits
                 unavoidable overheads (could be as low as 19\% in some
                 cases) over the corresponding in-core method (for mesh
                 sizes that fit completely in-core), this is a modest
                 and expected performance penalty. We evaluated our
                 methods on traditional clusters of workstations as well
                 as presented preliminary performance evaluation on
                 [the] emerging BlueWaters supercomputer.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Prosser:2011:LDS,
  author =       "Patrick Prosser and Chris Unsworth",
  title =        "Limited discrepancy search revisited",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "16:1--16:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2019581",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Harvey and Ginsberg's limited discrepancy search (LDS)
                 is based on the assumption that costly heuristic
                 mistakes are made early in the search process.
                 Consequently, LDS repeatedly probes the state space,
                 going against the heuristic (i.e., taking
                 discrepancies) a specified number of times in all
                 possible ways and attempts to take those discrepancies
                 as early as possible. LDS was improved by Richard Korf,
                 to become improved LDS (ILDS), but in doing so,
                 discrepancies were taken as late as possible, going
                 against the original assumption. Many subsequent
                 algorithms have faithfully inherited Korf's
                 interpretation of LDS, and take discrepancies late.
                 This then raises the question: Should we take our
                 discrepancies late or early? We repeat the original
                 experiments performed by Harvey and Ginsberg and those
                 by Korf in an attempt to answer this question. We also
                 investigate the early stopping condition of the YIELDS
                 algorithm, demonstrating that it is simple, elegant and
                 efficient.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tabourier:2011:GCR,
  author =       "Lionel Tabourier and Camille Roth and Jean-Philippe
                 Cointet",
  title =        "Generating constrained random graphs using multiple
                 edge switches",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "17:1--17:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2063515",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The generation of random graphs using edge swaps
                 provides a reliable method to draw uniformly random
                 samples of sets of graphs respecting some simple
                 constraints (e.g., degree distributions). However, in
                 general, it is not necessarily possible to access all
                 graphs obeying some given constraints through a
                 classical switching procedure calling on pairs of
                 edges. Therefore, we propose to get around this issue
                 by generalizing this classical approach through the use
                 of higher-order edge switches. This method, which we
                 denote by `` k -edge switching,'' makes it possible to
                 progressively improve the covered portion of a set of
                 constrained graphs, thereby providing an increasing,
                 asymptotically certain confidence on the statistical
                 representativeness of the obtained sample.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tsourakakis:2011:AAS,
  author =       "Charalampos E. Tsourakakis and Richard Peng and Maria
                 A. Tsiarli and Gary L. Miller and Russell Schwartz",
  title =        "Approximation algorithms for speeding up dynamic
                 programming and denoising {aCGH} data",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "18:1--18:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2063517",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The development of cancer is largely driven by the
                 gain or loss of subsets of the genome, promoting
                 uncontrolled growth or disabling defenses against it.
                 Denoising array-based Comparative Genome Hybridization
                 (aCGH) data is an important computational problem
                 central to understanding cancer evolution. In this
                 article, we propose a new formulation of the denoising
                 problem that we solve with a ``vanilla'' dynamic
                 programming algorithm, which runs in $O(n^2)$ units of
                 time. Then, we propose two approximation techniques.
                 Our first algorithm reduces the problem into a
                 well-studied geometric problem, namely halfspace
                 emptiness queries, and provides an $\epsilon$ additive
                 approximation to the optimal objective value in
                 $\tilde{O}(n ^{4 / 3 + \delta} \log (U / \epsilon))$
                 time, where $\delta$ is an arbitrarily small positive
                 constant and $U = \max\{\sqrt C, (|P_i|)_{i =
                 1,\ldots{}, n}\} (P = (P_1, P_2, \ldots{}, P_n), P_i
                 \in \mathbb{R})$, is the vector of the noisy aCGH
                 measurements, $C$ a normalization constant. The second
                 algorithm provides a $(1 \pm \epsilon)$ approximation
                 (multiplicative error) and runs in $O(n \log n /
                 \epsilon)$ time. The algorithm decomposes the initial
                 problem into a small (logarithmic) number of Monge
                 optimization subproblems that we can solve in linear
                 time using existing techniques. Finally, we validate
                 our model on synthetic and real cancer datasets. Our
                 method consistently achieves superior precision and
                 recall to leading competitors on the data with ground
                 truth. In addition, it finds several novel markers not
                 recorded in the benchmarks but supported in the
                 oncology literature.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Vahrenhold:2011:P,
  author =       "Jan Vahrenhold",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "21:1--21:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1970374",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Wang:2011:CEM,
  author =       "Bei Wang and Herbert Edelsbrunner and Dmitriy
                 Morozov",
  title =        "Computing elevation maxima by searching the {Gauss}
                 sphere",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "22:1--22:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1970375",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The elevation function on a smoothly embedded
                 2-manifold in R$^3$ reflects the multiscale topography
                 of cavities and protrusions as local maxima. The
                 function has been useful in identifying coarse docking
                 configurations for protein pairs. Transporting the
                 concept from the smooth to the piecewise linear
                 category, this article describes an algorithm for
                 finding all local maxima. While its worst-case running
                 time is the same as of the algorithm used in prior
                 work, its performance in practice is orders of
                 magnitudes superior. We cast light on this improvement
                 by relating the running time to the total absolute
                 Gaussian curvature of the 2-manifold.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rotta:2011:MLS,
  author =       "Randolf Rotta and Andreas Noack",
  title =        "Multilevel local search algorithms for modularity
                 clustering",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "23:1--23:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1970376",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modularity is a widely used quality measure for graph
                 clusterings. Its exact maximization is NP-hard and
                 prohibitively expensive for large graphs. Popular
                 heuristics first perform a coarsening phase, where
                 local search starting from singleton clusters is used
                 to compute a preliminary clustering, and then
                 optionally a refinement phase, where this clustering is
                 improved by moving vertices between clusters. As a
                 generalization, multilevel heuristics coarsen in
                 several stages, and refine by moving entire clusters
                 from each of these stages, not only individual
                 vertices. This article organizes existing and new
                 single-level and multilevel heuristics into a coherent
                 design space, and compares them experimentally with
                 respect to their effectiveness (achieved modularity)
                 and runtime. For coarsening by iterated cluster
                 joining, it turns out that the most widely used
                 criterion for joining clusters (modularity increase) is
                 outperformed by other simple criteria, that a recent
                 multistep algorithm [Schuetz and Caflisch 2008] is no
                 improvement over simple single-step coarsening for
                 these criteria, and that the recent multilevel
                 coarsening by iterated vertex moving [Blondel et al.
                 2008] is somewhat faster but slightly less effective
                 (with refinement). The new multilevel refinement is
                 significantly more effective than the conventional
                 single-level refinement or no refinement, in reasonable
                 runtime. A comparison with published benchmark results
                 and algorithm implementations shows that multilevel
                 local search heuristics, despite their relative
                 simplicity, are competitive with the best algorithms in
                 the literature.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bertasi:2011:PYA,
  author =       "Paolo Bertasi and Marco Bressan and Enoch Peserico",
  title =        "{{\tt psort}}, yet another fast stable sorting
                 software",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "24:1--24:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.1970377",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "{\tt psort} is the fastest sorting software according
                 to the PennySort benchmark, sorting 181GB of data in
                 2008 and 224GB in 2009 for \$0.01 of computer time.
                 This article details its internals, and the careful
                 fitting of its architecture to the structure of modern
                 PC-class platforms, allowing it to outperform
                 state-of-the-art sorting software such as STXXL sort.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Finocchi:2011:GEF,
  author =       "Irene Finocchi and John Hershberger",
  title =        "Guest editors' foreword",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "31:1--31:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025377",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Belazzougui:2011:TPM,
  author =       "Djamal Belazzougui and Paolo Boldi and Rasmus Pagh and
                 Sebastiano Vigna",
  title =        "Theory and practice of monotone minimal perfect
                 hashing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "32:1--32:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025378",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Minimal perfect hash functions have been shown to be
                 useful to compress data in several data management
                 tasks. In particular, order-preserving minimal perfect
                 hash functions (Fox et al. 1991) have been used to
                 retrieve the position of a key in a given list of keys;
                 however, the ability to preserve any given order leads
                 to an unavoidable $\Omega(n \log n)$ lower bound on the
                 number of bits required to store the function.
                 Recently, it was observed (Belazzougui et al. 2009)
                 that very frequently the keys to be hashed are sorted
                 in their intrinsic (i.e., lexicographical) order. This
                 is typically the case of dictionaries of search
                 engines, list of URLs of Web graphs, and so on. We
                 refer to this restricted version of the problem as
                 monotone minimal perfect hashing. We analyze
                 experimentally the data structures proposed in
                 Belazzougui et al. [2009], and along our way we propose
                 some new methods that, albeit asymptotically equivalent
                 or worse, perform very well in practice and provide a
                 balance between access speed, ease of construction, and
                 space usage.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Doerr:2011:QRS,
  author =       "Benjamin Doerr and Tobias Friedrich and Marvin
                 K{\"u}nnemann and Thomas Sauerwald",
  title =        "Quasirandom rumor spreading: an experimental
                 analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "33:1--33:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025379",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We empirically analyze two versions of the well-known
                 ``randomized rumor spreading'' protocol to disseminate
                 a piece of information in networks. In the classical
                 model, in each round, each informed node informs a
                 random neighbor. In the recently proposed quasirandom
                 variant, each node has a (cyclic) list of its
                 neighbors. Once informed, it starts at a random
                 position of the list, but from then on informs its
                 neighbors in the order of the list. While for sparse
                 random graphs a better performance of the quasirandom
                 model could be proven, all other results show that,
                 independent of the structure of the lists, the same
                 asymptotic performance guarantees hold as for the
                 classical model. In this work, we compare the two
                 models experimentally. Not only does this show that the
                 quasirandom model generally is faster, but it also
                 shows that the runtime is more concentrated around the
                 mean. This is surprising given that much fewer random
                 bits are used in the quasirandom process. These
                 advantages are also observed in a lossy communication
                 model, where each transmission does not reach its
                 target with a certain probability, and in an
                 asynchronous model, where nodes send at random times
                 drawn from an exponential distribution. We also show
                 that typically the particular structure of the lists
                 has little influence on the efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "3.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Haverkort:2011:FDH,
  author =       "Herman Haverkort and Freek V. Walderveen",
  title =        "Four-dimensional {Hilbert} curves for {$R$}-trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "34:1--34:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025380",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Two-dimensional R-trees are a class of spatial index
                 structures in which objects are arranged to enable fast
                 window queries: report all objects that intersect a
                 given query window. One of the most successful methods
                 of arranging the objects in the index structure is
                 based on sorting the objects according to the positions
                 of their centers along a two-dimensional Hilbert
                 space-filling curve. Alternatively, one may use the
                 coordinates of the objects' bounding boxes to represent
                 each object by a four-dimensional point, and sort these
                 points along a four-dimensional Hilbert-type curve. In
                 experiments by Kamel and Faloutsos and by Arge et al.,
                 the first solution consistently outperformed the latter
                 when applied to point data, while the latter solution
                 clearly outperformed the first on certain artificial
                 rectangle data. These authors did not specify which
                 four-dimensional Hilbert-type curve was used; many
                 exist. In this article, we show that the results of the
                 previous articles can be explained by the choice of the
                 four-dimensional Hilbert-type curve that was used and
                 by the way it was rotated in four-dimensional space. By
                 selecting a curve that has certain properties and
                 choosing the right rotation, one can combine the
                 strengths of the two-dimensional and the
                 four-dimensional approach into one, while avoiding
                 their apparent weaknesses. The effectiveness of our
                 approach is demonstrated with experiments on various
                 datasets. For real data taken from VLSI design, our new
                 curve yields R-trees with query times that are better
                 than those of R-trees that were obtained with
                 previously used curves.",
  acknowledgement = ack-nhfb,
  articleno =    "3.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Negrucseri:2011:SMF,
  author =       "Cosmin Silvestru Negrucseri and Mircea Bogdan Pacsosi
                 and Barbara Stanley and Clifford Stein and Cristian
                 George Strat",
  title =        "Solving maximum flow problems on real-world bipartite
                 graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "35:1--35:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025381",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we present an experimental study of
                 several maximum-flow algorithms in the context of
                 unbalanced bipartite networks. Our experiments are
                 motivated by a real-world problem of managing
                 reservation-based inventory in Google content ad
                 systems. We are interested in observing the performance
                 of several push-relabel algorithms on our real-world
                 datasets and also on some generated ones. Previous work
                 suggested an important improvement for push-relabel
                 algorithms on unbalanced bipartite networks: the
                 two-edge push rule. We show how the two-edge push rule
                 improves the running time. While no single algorithm
                 dominates the results, we show there is one that has
                 very robust performance in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "3.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tazari:2011:DLH,
  author =       "Siamak Tazari and Matthias M{\"u}ller-Hannemann",
  title =        "Dealing with large hidden constants: engineering a
                 {Planar Steiner Tree (PTAS)}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "36:1--36:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1963190.2025382",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present the first attempt on implementing a highly
                 theoretical polynomial-time approximation scheme (PTAS)
                 with huge hidden constants, namely, the PTAS for
                 Steiner tree in planar graphs by Borradaile, Klein, and
                 Mathieu (2009). Whereas this result, and several other
                 PTAS results of the recent years, are of high
                 theoretical importance, no practical applications or
                 even implementation attempts have been known to date,
                 due to the extremely large constants that are involved
                 in them. We describe techniques on how to circumvent
                 the challenges in implementing such a scheme. With
                 today's limitations on processing power and space, we
                 still have to sacrifice approximation guarantees for
                 improved running times by choosing some parameters
                 empirically. But our experiments show that with our
                 choice of parameters, we do get the desired
                 approximation ratios, suggesting that a much tighter
                 analysis might be possible. Our computational
                 experiments with benchmark instances from SteinLib and
                 large artificial instances well exceeded our own
                 expectations. We demonstrate that we are able to handle
                 instances with up to a million nodes and several
                 hundreds of terminals in 1.5 hours on a standard PC. On
                 the rectilinear preprocessed instances from SteinLib,
                 we observe a monotonous improvement for smaller values
                 of $\epsilon$, with an average gap below 1\% for
                 $\epsilon = 0.1$. We compare our implementation against
                 the well-known batched $1$-Steiner heuristic and
                 observe that on very large instances, we are able to
                 produce comparable solutions much faster. We also
                 present a thorough experimental evaluation of the
                 influence of the various parameters of the PTAS and
                 thus obtain a better understanding of their empirical
                 effects.",
  acknowledgement = ack-nhfb,
  articleno =    "3.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Reams:2012:AFD,
  author =       "Charles Reams",
  title =        "{Anatree}: a Fast Data Structure for Anagrams",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "17",
  number =       "1",
  pages =        "11:1--11:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133803.2133804",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Mar 27 17:30:32 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Natural language is a rich source of constraint
                 satisfaction problems (CSPs), with a uniquely
                 structured solution domain. We describe a number of
                 approaches to satisfying the particular case of
                 unordered letter-level constraints, including anagrams,
                 but also relevant to typographical error correction,
                 password security and word puzzles among other fields.
                 We define the anatree, a data structure that can solve
                 many such problems in constant time with respect to the
                 size of the lexicon. The structure represents the
                 lexicon of a language in a format somewhat analogous to
                 a binary decision diagram (BDD) and, as with BDDs,
                 construction heuristics allow the real average-case
                 performance to vastly exceed the theoretical worst
                 case. We compare anatrees and their alternatives
                 empirically, explore the behavior of the construction
                 heuristics, and characterize the tasks for which each
                 is best suited.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Geisberger:2012:RPF,
  author =       "Robert Geisberger and Michael N. Rice and Peter
                 Sanders and Vassilis J. Tsotras",
  title =        "Route planning with flexible edge restrictions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "17",
  number =       "1",
  pages =        "12:1--12:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133803.2133805",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Mar 27 17:30:32 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this work, we explore a new type of flexible
                 shortest-path query, in which the query can be
                 dynamically parameterized to constrain the type of
                 edges that may be included in the resulting shortest
                 path (e.g., find the shortest path in a road network
                 that avoids toll roads and low overpasses, respective
                 of the specified vehicle height). We extend the
                 hierarchical preprocessing technique known as
                 Contraction Hierarchies to efficiently support such
                 flexible queries. We also present several effective
                 algorithmic optimizations for further improving the
                 overall scalability and query times of this approach,
                 including the addition of goal-directed search
                 techniques, search space pruning techniques, and
                 generalizing the constraints of the local search.
                 Experiments are presented for both the North American
                 and the European road networks, showcasing the general
                 effectiveness and scalability of our proposed
                 methodology to large-scale, real-world graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Abraham:2013:ARR,
  author =       "Ittai Abraham and Daniel Delling and Andrew V.
                 Goldberg and Renato F. Werneck",
  title =        "Alternative routes in road networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444019",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the problem of finding good alternative
                 routes in road networks. We look for routes that are
                 substantially different from the shortest path, have
                 small stretch, and are locally optimal. We formally
                 define the problem of finding alternative routes with a
                 single via vertex, develop efficient algorithms for it,
                 and evaluate them experimentally. Our algorithms are
                 efficient enough for practical use and compare
                 favorably with previous methods in both speed and
                 solution quality.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Batz:2013:MTD,
  author =       "G. Veit Batz and Robert Geisberger and Peter Sanders
                 and Christian Vetter",
  title =        "Minimum time-dependent travel times with contraction
                 hierarchies",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.4:1--1.4:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444020",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Time-dependent road networks are represented as
                 weighted graphs, where the weight of an edge depends on
                 the time one passes through that edge. This way, we can
                 model periodic congestions during rush hour and similar
                 effects. In this work we deal with the special case
                 where edge weights are time-dependent travel times.
                 Namely, we consider two problems in this setting:
                 Earliest arrival queries ask for a minimum travel time
                 route for a start and a destination depending on a
                 given departure time. Travel time profile queries ask
                 for the travel time profile for a start, a destination,
                 and an interval of possible departure times. For an
                 instance representing the German road network, for
                 example, we can answer earliest arrival queries in less
                 than 1.5ms. For travel time profile queries, which are
                 much harder to answer, we need less than 40ms if the
                 interval of possible departure times has a width of 24
                 hours. For inexact travel time profiles with an allowed
                 error of about 1\% this even reduces to 3.2ms. The
                 underlying hierarchical representations of the road
                 network, which are variants of a time-dependent
                 contraction hierarchy (TCH), need less than 1GiB of
                 space and can be generated in about 30 minutes. As far
                 as we know, TCHs are currently the only method being
                 able to answer travel time profile queries efficiently.
                 Altogether, with TCHs, web servers with massive request
                 traffic are able to provide fast time-dependent
                 earliest arrival route planning and computation of
                 travel time profiles.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bonami:2013:BRC,
  author =       "Pierre Bonami and Jon Lee and Sven Leyffer and Andreas
                 W{\"a}chter",
  title =        "On branching rules for convex mixed-integer nonlinear
                 optimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.6:1--2.6:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2532568",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Branch-and-Bound (B\&B) is perhaps the most
                 fundamental algorithm for the global solution of convex
                 Mixed-Integer Nonlinear Programming (MINLP) problems.
                 It is well-known that carrying out branching in a
                 nonsimplistic manner can greatly enhance the
                 practicality of B\&B in the context of Mixed-Integer
                 Linear Programming (MILP). No detailed study of
                 branching has heretofore been carried out for MINLP. In
                 this article, we study and identify useful
                 sophisticated branching methods for MINLP, including
                 novel approaches based on approximations of the
                 nonlinear relaxations by linear and quadratic
                 programs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Canzar:2013:PDA,
  author =       "Stefan Canzar and Khaled Elbassioni and Juli{\'a}n
                 Mestre",
  title =        "A polynomial-delay algorithm for enumerating
                 approximate solutions to the interval constrained
                 coloring problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2493372",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the interval constrained coloring problem, a
                 combinatorial problem arising in the interpretation of
                 data on protein structure emanating from experiments
                 based on hydrogen/deuterium exchange and mass
                 spectrometry. The problem captures the challenging task
                 of increasing the spatial resolution of experimental
                 data in order to get a better picture of the protein
                 structure. Since solutions proposed by any algorithmic
                 framework have to ultimately be verified by
                 biochemists, it is important to provide not just a
                 single solution, but a valuable set of candidate
                 solutions. Our contribution is a polynomial-delay,
                 polynomial-space algorithm for enumerating all exact
                 solutions plus further approximate solutions, which are
                 guaranteed to be within an absolute error of two of the
                 optimum within fragments of the protein, that is,
                 within sets of consecutive residues. Our experiments
                 indicate that the quality of the approximate solutions
                 is comparable to the optimal ones in terms of deviation
                 from the underlying true solution. In addition, the
                 experiments also confirm the effectiveness of the
                 method in reducing the delay between two consecutive
                 solutions considerably, compared to what it takes an
                 integer programming solver to produce the next exact
                 solution.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Delort:2013:HDP,
  author =       "Charles Delort and Olivier Spanjaard",
  title =        "A hybrid dynamic programming approach to the
                 biobjective binary knapsack problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444018",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This article is devoted to a study of the impact of
                 using bound sets in biobjective dynamic programming.
                 This notion, introduced by Villareal and Karwan [1981],
                 has been independently revisited by Ehrgott and
                 Gandibleux [2007], as well as by Sourd and Spanjaard
                 [2008]. The idea behind it is very general and can,
                 therefore, be adapted to a wide range of biobjective
                 combinatorial problems. We focus here on the
                 biobjective binary knapsack problem. We show that using
                 bound sets to perform a hybrid dynamic programming
                 procedure embedded in a two-phase method [Ulungu and
                 Teghem 1995] yields numerical results that outperform
                 previous dynamic programming approaches to the problem,
                 both in execution times and memory requirements.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ferraro-Petrillo:2013:DSR,
  author =       "Umberto Ferraro-Petrillo and Fabrizio Grandoni and
                 Giuseppe F. Italiano",
  title =        "Data structures resilient to memory faults: an
                 experimental study of dictionaries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.6:1--1.6:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444022",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We address the problem of implementing data structures
                 resilient to memory faults, which may arbitrarily
                 corrupt memory locations. In this framework, we focus
                 on the implementation of dictionaries and perform a
                 thorough experimental study using a testbed that we
                 designed for this purpose. Our main discovery is that
                 the best-known (asymptotically optimal) resilient data
                 structures have very large space overheads. More
                 precisely, most of the space used by these data
                 structures is not due to key storage. This might not be
                 acceptable in practice, since resilient data structures
                 are meant for applications where a huge amount of data
                 (often of the order of terabytes) has to be stored.
                 Exploiting techniques developed in the context of
                 resilient (static) sorting and searching, in
                 combination with some new ideas, we designed and
                 engineered an alternative implementation, which, while
                 still guaranteeing optimal asymptotic time and space
                 bounds, performs much better in terms of memory without
                 compromising the time efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Festa:2013:FSI,
  author =       "Paola Festa",
  title =        "Foreword to the special issue {SEA 2010}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444017",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gog:2013:CST,
  author =       "Simon Gog and Enno Ohlebusch",
  title =        "Compressed suffix trees: Efficient computation and
                 storage of {LCP}-values",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2461327",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The suffix tree is a very important data structure in
                 string processing, but typical implementations suffer
                 from huge space consumption. In large-scale
                 applications, compressed suffix trees (CSTs) are
                 therefore used instead. A CST consists of three
                 (compressed) components: the suffix array, the longest
                 common prefix (LCP)-array and data structures for
                 simulating navigational operations on the suffix tree.
                 The LCP-array stores the lengths of the LCPs of
                 lexicographically adjacent suffixes, and it can be
                 computed in linear time. In this article, we present a
                 new LCP-array construction algorithm that is fast and
                 very space efficient. In practice, our algorithm
                 outperforms alternative algorithms. Moreover, we
                 introduce a new compressed representation of
                 LCP-arrays.",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gorke:2013:DGC,
  author =       "Robert G{\"o}rke and Pascal Maillard and Andrea Schumm
                 and Christian Staudt and Dorothea Wagner",
  title =        "Dynamic graph clustering combining modularity and
                 smoothness",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.5:1--1.5:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2444021",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Maximizing the quality index modularity has become one
                 of the primary methods for identifying the clustering
                 structure within a graph. Since many contemporary
                 networks are not static but evolve over time,
                 traditional static approaches can be inappropriate for
                 specific tasks. In this work, we pioneer the NP-hard
                 problem of online dynamic modularity maximization. We
                 develop scalable dynamizations of the currently fastest
                 and the most widespread static heuristics and engineer
                 a heuristic dynamization of an optimal static
                 algorithm. Our algorithms efficiently maintain a
                 modularity -based clustering of a graph for which
                 dynamic changes arrive as a stream. For our quickest
                 heuristic we prove a tight bound on its number of
                 operations. In an experimental evaluation on both a
                 real-world dynamic network and on dynamic clustered
                 random graphs, we show that the dynamic maintenance of
                 a clustering of a changing graph yields higher
                 modularity than recomputation, guarantees much smoother
                 clustering dynamics, and requires much lower runtimes.
                 We conclude with giving sound recommendations for the
                 choice of an algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hofri:2013:OSS,
  author =       "Micha Hofri",
  title =        "Optimal selection and sorting via dynamic
                 programming",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2444016.2493373",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We show how to find optimal algorithms for the
                 selection of one or more order statistics over a small
                 set of numbers, and as an extreme case, complete
                 sorting. The criterion is using the smallest number of
                 comparisons; separate derivations are performed for
                 minimization on the average (over all permutations) or
                 in the worst case. When the computational process
                 establishes the optimal values, it also generates
                 C-language functions that implement policies which
                 achieve those optimal values. The search for the
                 algorithms is driven by a Markov decision process, and
                 the program provides the optimality proof as well.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kouri:2013:FRM,
  author =       "Tina M. Kouri and Dinesh P. Mehta",
  title =        "Faster reaction mapping through improved naming
                 techniques",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.5:1--2.5:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2532569",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Automated reaction mapping is an important tool in
                 cheminformatics where it may be used to classify
                 reactions or validate reaction mechanisms. The reaction
                 mapping problem is known to be NP-Hard and may be
                 formulated as an optimization problem. In this article,
                 we present four algorithms that continue to obtain
                 optimal solutions to this problem, but with
                 significantly improved runtimes over the previous
                 Constructive Count Vector (CCV) algorithm. Our
                 algorithmic improvements include (i) the use of a fast
                 (but not 100\% accurate) canonical labeling algorithm,
                 (ii) name reuse (i.e., storing intermediate results
                 rather than recomputing), and (iii) an incremental
                 approach to canonical name computation. The time to map
                 the reactions from the Kegg/Ligand database previously
                 took over 2 days using CCV, but now it takes fewer than
                 4 hours to complete. Experimental results on chemical
                 reaction databases demonstrate our 2-CCV FDN MS
                 algorithm usually performs over fifteen times faster
                 than previous automated reaction mapping algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kouzinopoulos:2013:EOT,
  author =       "Charalampos S. Kouzinopoulos and Konstantinos G.
                 Margaritis",
  title =        "Exact online two-dimensional pattern matching using
                 multiple pattern matching algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2513148",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Baker and Bird and Baeza-Yates and Regnier are two of
                 the most efficient and widely used algorithms for exact
                 online two-dimensional pattern matching. Both use the
                 automaton of the Aho--Corasick multiple pattern
                 matching algorithm to locate all the occurrences of a
                 two-dimensional pattern in a two-dimensional input
                 string, a data structure that is considered by many as
                 inefficient, especially when used to process long
                 patterns or data using large alphabet sizes. This
                 article presents variants of the Baker and Bird and the
                 Baeza-Yates and Regnier algorithms that use the data
                 structures of the Set Horspool, Wu-Manber, Set Backward
                 Oracle Matching, and SOG multiple pattern matching
                 algorithms in place of the automaton of Aho--Corasick
                 and evaluates their performance experimentally in terms
                 of preprocessing and searching time.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gonzalez:2014:LCS,
  author =       "Rodrigo Gonz{\'a}lez and Gonzalo Navarro and
                 H{\'e}ctor Ferrada",
  title =        "Locally Compressed Suffix Arrays",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2594408",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:05 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce a compression technique for suffix
                 arrays. It is sensitive to the compressibility of the
                 text and local, meaning that random portions of the
                 suffix array can be decompressed by accessing mostly
                 contiguous memory areas. This makes decompression very
                 fast, especially when various contiguous cells must be
                 accessed. Our main technical contributions are the
                 following. First, we show that runs of consecutive
                 values that are known to appear in function $ \Psi (i)
                 = A^{-1} [A[i] + 1] $ of suffix arrays $A$ of
                 compressible texts also show up as repetitions in the
                 differential suffix array $ A'[i] = A[i] - A [i - 1]$.
                 Second, we use Re-Pair, a grammar-based compressor, to
                 compress the differential suffix array, and upper bound
                 its compression ratio in terms of the number of runs.
                 Third, we show how to compact the space used by the
                 grammar rules by up to 50\%, while still permitting
                 direct access to the rules. Fourth, we develop specific
                 variants of Re-Pair that work using knowledge of $ \Psi
                 $, and use much less space than the general Re-Pair
                 compressor, while achieving almost the same compression
                 ratios. Fifth, we implement the scheme and compare it
                 exhaustively with previous work, including the first
                 implementations of previous theoretical proposals.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Doerr:2014:RRP,
  author =       "Benjamin Doerr and Magnus Wahlstr{\"o}m",
  title =        "Randomized Rounding in the Presence of a Cardinality
                 Constraint",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        may,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2594409",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:05 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of generating randomized
                 roundings that satisfy a single cardinality constraint
                 and admit Chernoff-type large deviation bounds for
                 weighted sums of the variables. That this can be done
                 efficiently was proven by Srinivasan [2001], a
                 different approach was later given by the first author
                 [Doerr 2006]. In this work, we (a) present an improved
                 version of the bitwise derandomization given by Doerr,
                 (b) give the first derandomization of Srinivasan's
                 tree-based randomized approach and prove its
                 correctness, and (c) experimentally compare the
                 resulting algorithms. Our experiments show that adding
                 a single cardinality constraint typically reduces the
                 rounding errors and only moderately increases the
                 running times. In general, our derandomization of the
                 tree-based approach is superior to the derandomized
                 bitwise one, while the two randomized versions produce
                 very similar rounding errors. When implementing the
                 derandomized tree-based approach, however, the choice
                 of the tree is important.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Auer:2014:EMC,
  author =       "B. O. Fagginger Auer and R. H. Bisseling",
  title =        "Efficient Matching for Column Intersection Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.3:1--1.3:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2616587",
  ISSN =         "1084-6654",
  bibdate =      "Wed Sep 10 07:29:23 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "To improve the quality and efficiency of
                 hypergraph-based matrix partitioners, we investigate
                 high-quality matchings in column intersection graphs of
                 large sparse binary matrices. We show that such
                 algorithms have a natural decomposition in an
                 integer-weighted graph-matching function and a
                 neighbor-finding function and study the performance of
                 16 combinations of these functions. We improve upon the
                 original matching algorithm of the Mondriaan matrix
                 partitioner: by using PGA', we improve the average
                 matching quality from 95.3\% to 97.4\% of the optimum
                 value; by using our new neighbor-finding heuristic, we
                 obtain comparable quality and speedups of up to a
                 factor of 19.6.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Angione:2014:SMB,
  author =       "Claudio Angione and Annalisa Occhipinti and Giuseppe
                 Nicosia",
  title =        "Satisfiability by {Maxwell--Boltzmann} and
                 {Bose--Einstein} Statistical Distributions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.4:1--1.4:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2629498",
  ISSN =         "1084-6654",
  bibdate =      "Wed Sep 10 07:29:23 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Recent studies in theoretical computer science have
                 exploited new algorithms and methodologies based on
                 statistical physics for investigating the structure and
                 the properties of the Satisfiability (SAT) problem. We
                 propose a characterization of the SAT problem as a
                 physical system, using both quantum and classical
                 statistical-physical models. We associate a graph to an
                 SAT instance and we prove that a Bose--Einstein
                 condensation occurs in the instance with higher
                 probability if the quantum distribution is adopted in
                 the generation of the graph. Conversely, the
                 fit-get-rich behavior is more likely if we adopt the
                 Maxwell--Boltzmann distribution. Our method allows a
                 comprehensive analysis of the SAT problem based on a
                 new definition of entropy of an instance, without
                 requiring the computation of its truth assignments. The
                 entropy of an SAT instance increases in the
                 satisfiability region as the number of free variables
                 in the instance increases. Finally, we develop six new
                 solvers for the MaxSAT problem based on quantum and
                 classical statistical distributions, and we test them
                 on random SAT instances, with competitive results. We
                 experimentally prove that the performance of the
                 solvers based on the two distributions depends on the
                 criterion used to flag clauses as satisfied in the SAT
                 solving process.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}


@Article{Gorke:2014:EDC,
  author =       "Robert G{\"o}rke and Andrea Kappes and Dorothea
                 Wagner",
  title =        "Experiments on Density-Constrained Graph Clustering",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "6:1--6:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2638551",
  ISSN =         "1084-6654",
  bibdate =      "Wed Sep 10 07:29:23 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Clustering a graph means identifying internally dense
                 subgraphs that are only sparsely interconnected.
                 Formalizations of this notion lead to measures that
                 quantify the quality of a clustering and to algorithms
                 that actually find clusterings. Since, most generally,
                 corresponding optimization problems are hard, heuristic
                 clustering algorithms are used in practice, or other
                 approaches that are not based on an objective function.
                 In this work, we conduct a comprehensive experimental
                 evaluation of the qualitative behavior of greedy
                 bottom-up heuristics driven by cut-based objectives and
                 constrained by intracluster density, using both
                 real-world data and artificial instances. Our study
                 documents that a greedy strategy based on local
                 movement is superior to one based on merging. We
                 further reveal that the former approach generally
                 outperforms alternative setups and reference algorithms
                 from the literature in terms of its own objective,
                 while a modularity-based algorithm competes
                 surprisingly well. Finally, we exhibit which
                 combinations of cut-based inter- and intracluster
                 measures are suitable for identifying a hidden
                 reference clustering in synthetic random graphs and
                 discuss the skewness of the resulting cluster size
                 distributions. Our results serve as a guideline to the
                 usage of bicriterial, cut-based measures for graph
                 clusterings.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

%% [10-Sep-2014] Check for additional papers from in-progress issue v19.