%%% -*-BibTeX-*-
%%% ====================================================================
%%% BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.31",
%%%     date            = "22 April 2014",
%%%     time            = "18:10:17 MDT",
%%%     filename        = "tap.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "37687 9440 48084 457923",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM Transactions on Applied Perception;
%%%                        bibliography; data processing;
%%%                        human-computer interaction; psychology;
%%%                        TAP; visual perception",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        ACM Transactions on Applied Perception (CODEN
%%%                        ????, ISSN 1544-3558 (print), 1544-3965
%%%                        (electronic)), covering all journal issues
%%%                        from 2004 -- date.
%%%
%%%                        At version 1.31, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2004 (   9)    2008 (  22)    2012 (  21)
%%%                             2005 (  51)    2009 (  31)    2013 (  27)
%%%                             2006 (  25)    2010 (  35)    2014 (   4)
%%%                             2007 (  19)    2011 (  19)
%%%
%%%                             Article:        263
%%%
%%%                             Total entries:  263
%%%
%%%                        The journal Web page can be found at:
%%%
%%%                            http://www.acm.org/pubs/tap.html
%%%
%%%                        The journal table of contents page is at:
%%%
%%%                            http://www.acm.org/tap/
%%%                            http://www.acm.org/tap/PastIssues.html
%%%                            http://www.acm.org/tap/TitlesToAppear.html
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J932
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        The initial draft was extracted from the ACM
%%%                        Web pages.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        bibsource keys in the bibliography entries
%%%                        below indicate the entry originally came
%%%                        from the computer science bibliography
%%%                        archive, even though it has likely since
%%%                        been corrected and updated.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility."
%%%     }
%%% ====================================================================

@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:

@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:

@String{j-TAP                   = "ACM Transactions on Applied Perception"}

%%% ====================================================================
%%% Bibliography entries:

@Article{Reinhard:2004:E,
  author =       "Erik Reinhard and Heinrich B{\"u}lthoff",
  title =        "Editorial",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "1--2",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ware:2004:MSR,
  author =       "Colin Ware and Robert Bobrow",
  title =        "Motion to support rapid interactive queries on
                 node--link diagrams",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "3--18",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
}

@Article{Frowd:2004:EHE,
  author =       "Charlie D. Frowd and Peter J. B. Hancock and Derek
                 Carson",
  title =        "{EvoFIT}: {A} holistic, evolutionary facial imaging
                 technique for creating composites",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "19--39",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Surakka:2004:GFN,
  author =       "Veikko Surakka and Marko Illi and Poika Isokoski",
  title =        "Gazing and frowning as a new human--computer
                 interaction technique",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "40--56",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Yu:2004:MLI,
  author =       "Chen Yu and Dana H. Ballard",
  title =        "A multimodal learning interface for grounding spoken
                 language in sensory perceptions",
  journal =      j-TAP,
  volume =       "1",
  number =       "1",
  pages =        "57--80",
  month =        jul,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gugerty:2004:ESA,
  author =       "Leo Gugerty and Richard A. Tyrrell and Thomas R. Aten
                 and K. Andy Edmonds",
  title =        "The effects of subpixel addressing on users'
                 performance and preferences during reading-related
                 tasks",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "81--101",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Robles-De-La-Torre:2004:NEI,
  author =       "G. Robles-De-La-Torre and R. Sekuler",
  title =        "Numerically estimating internal models of dynamic
                 virtual objects",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "102--117",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ottaviani:2004:APS,
  author =       "Laura Ottaviani and Davide Rocchesso",
  title =        "Auditory perception of {$3$D} size: Experiments with
                 synthetic resonators",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "118--129",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McGookin:2004:UCE,
  author =       "David K. McGookin and Stephen A. Brewster",
  title =        "Understanding concurrent earcons: Applying auditory
                 scene analysis principles to concurrent earcon
                 recognition",
  journal =      j-TAP,
  volume =       "1",
  number =       "2",
  pages =        "130--155",
  month =        oct,
  year =         "2004",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shin:2005:VCA,
  author =       "Do Hyoung Shin and Phillip S. Dunston and Xiangyu
                 Wang",
  title =        "View changes in augmented reality
                 computer-aided-drawing",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "1--14",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Payandeh:2005:SLD,
  author =       "Shahram Payandeh and John Dill and Jian Zhang",
  title =        "A study of level-of-detail in haptic rendering",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "15--34",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sahm:2005:TVW,
  author =       "Cynthia S. Sahm and Sarah H. Creem-Regehr and William
                 B. Thompson and Peter Willemsen",
  title =        "Throwing versus walking as indicators of distance
                 perception in similar real and virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "35--45",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kikuuwe:2005:EHD,
  author =       "Ryo Kikuuwe and Akihito Sano and Hiromi Mochiyama and
                 Naoyuki Takesue and Hideo Fujimoto",
  title =        "Enhancing haptic detection of surface undulation",
  journal =      j-TAP,
  volume =       "2",
  number =       "1",
  pages =        "46--67",
  month =        jan,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:58 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Harders:2005:ESI,
  author =       "Matthias Harders and Marc Ernst",
  title =        "{EuroHaptics} special issue editorial",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "69--70",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Dinse:2005:IHH,
  author =       "Hubert R. Dinse and Tobias Kalisch and Patrick Ragert
                 and Burkhard Pleger and Peter Schwenkreis and Martin
                 Tegenthoff",
  title =        "Improving human haptic performance in normal and
                 impaired human populations through unattended
                 activation-based learning",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "71--88",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Choi:2005:FCE,
  author =       "Seungmoon Choi and Laron Walker and Hong Z. Tan and
                 Scott Crittenden and Ron Reifenberger",
  title =        "Force constancy and its effect on haptic perception of
                 virtual surfaces",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "89--105",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{VanErp:2005:WNV,
  author =       "Jan B. F. {Van Erp} and Hendrik A. H. C. {Van Veen}
                 and Chris Jansen and Trevor Dobbins",
  title =        "Waypoint navigation with a vibrotactile waist belt",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "106--117",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Drewing:2005:FEN,
  author =       "Knut Drewing and Michael Fritschi and Regine Zopf and
                 Marc O. Ernst and Martin Buss",
  title =        "First evaluation of a novel tactile display exerting
                 shear force via lateral displacement",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "118--131",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Levesque:2005:DVB,
  author =       "Vincent L{\'e}vesque and J{\'e}r{\^o}me Pasquero and
                 Vincent Hayward and Maryse Legault",
  title =        "Display of virtual {Braille} dots by lateral skin
                 deformation: feasibility study",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "132--149",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1060581.1060587",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Webster:2005:NTD,
  author =       "Robert J. {Webster III} and Todd E. Murphy and Lawton
                 N. Verner and Allison M. Okamura",
  title =        "A novel two-dimensional tactile slip display: design,
                 kinematics and perceptual experiments",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "150--165",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schmidt:2005:HNH,
  author =       "Henning Schmidt and Stefan Hesse and Rolf Bernhardt
                 and J{\"o}rg Kr{\"u}ger",
  title =        "{HapticWalker}---a novel haptic foot device",
  journal =      j-TAP,
  volume =       "2",
  number =       "2",
  pages =        "166--180",
  month =        apr,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rushmeier:2005:GE,
  author =       "Holly Rushmeier",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "181--182",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2005:VCC,
  author =       "Bernhard E. Riecke and Markus {Von Der Heyde} and
                 Heinrich H. B{\"u}lthoff",
  title =        "Visual cues can be sufficient for triggering
                 automatic, reflexlike spatial updating",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "183--215",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Plumert:2005:DPR,
  author =       "Jodie M. Plumert and Joseph K. Kearney and James F.
                 Cremer and Kara Recker",
  title =        "Distance perception in real and virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "216--233",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Messing:2005:DPV,
  author =       "Ross Messing and Frank H. Durgin",
  title =        "Distance Perception and the Visual Horizon in
                 Head-Mounted Displays",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "234--250",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cunningham:2005:MVS,
  author =       "Douglas W. Cunningham and Mario Kleiner and Christian
                 Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Manipulating Video Sequences to Determine the
                 Components of Conversational Facial Expressions",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "251--269",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cosker:2005:TPR,
  author =       "Darren Cosker and David Marshall and Paul L. Rosin and
                 Susan Paddock and Simon Rushton",
  title =        "Toward Perceptually Realistic Talking Heads: Models,
                 Methods, and {McGurk}",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "270--285",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Howlett:2005:PES,
  author =       "Sarah Howlett and John Hamill and Carol O'Sullivan",
  title =        "Predicting and Evaluating Saliency for Simplified
                 Polygonal Models",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "286--308",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{OSullivan:2005:CA,
  author =       "Carol O'Sullivan",
  title =        "Collisions and Attention",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "309--321",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chang:2005:EBC,
  author =       "Youngha Chang and Suguru Saito and Keiji Uchikawa and
                 Masayuki Nakajima",
  title =        "Example-Based Color Stylization of Images",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "322--345",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fleming:2005:LLI,
  author =       "Roland W. Fleming and Heinrich H. B{\"u}lthoff",
  title =        "Low-Level Image Cues in the Perception of Translucent
                 Materials",
  journal =      j-TAP,
  volume =       "2",
  number =       "3",
  pages =        "346--382",
  month =        jul,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kramer:2005:SSM,
  author =       "Gregory Kramer and Bruce N. Walker",
  title =        "Sound science: Marking ten international conferences
                 on auditory display",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "383--388",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barrass:2005:PFA,
  author =       "Stephen Barrass",
  title =        "A perceptual framework for the auditory display of
                 scientific data",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "389--402",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Barrass:2005:CFA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barrass:2005:CFA,
  author =       "Stephen Barrass",
  title =        "A comprehensive framework for auditory display:
                 Comments on {Barrass}, {ICAD 1994}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "403--406",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Barrass:2005:PFA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Walker:2005:MMA,
  author =       "Bruce N. Walker and Gregory Kramer",
  title =        "Mappings and metaphors in auditory displays: An
                 experimental assessment",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "407--412",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Walker:2005:SDM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Walker:2005:SDM,
  author =       "Bruce N. Walker and Gregory Kramer",
  title =        "Sonification design and metaphors: Comments on
                 {Walker} and {Kramer}, {ICAD 1996}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "413--417",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Walker:2005:MMA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shinn-Cunningham:2005:PPS,
  author =       "Barbara G. Shinn-Cunningham and Timothy Streeter and
                 Jean-Fran{\c{c}}ois Gyss",
  title =        "Perceptual plasticity in spatial auditory displays",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "418--425",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Shinn-Cunningham:2005:SAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shinn-Cunningham:2005:SAD,
  author =       "Barbara G. Shinn-Cunningham and Timothy Streeter",
  title =        "Spatial auditory display: Comments on
                 {Shinn-Cunningham} et al., {ICAD 2001}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "426--429",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Shinn-Cunningham:2005:PPS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brungart:2005:OSC,
  author =       "Douglas S. Brungart and Brian D. Simpson",
  title =        "Optimizing the spatial configuration of a seven-talker
                 speech display",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "430--436",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Brungart:2005:OVS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brungart:2005:OVS,
  author =       "Douglas S. Brungart and Brian D. Simpson",
  title =        "Optimizing a virtual speech display: Comments on
                 {Brungart} and {Simpson}, {ICAD 2003}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "437--441",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Brungart:2005:OSC}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Edwards:2005:PMS,
  author =       "Alistair D. N. Edwards and Evangelos Mitsopoulos",
  title =        "A principled methodology for the specification and
                 design of nonvisual widgets",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "442--449",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Edwards:2005:PAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Edwards:2005:PAD,
  author =       "Alistair D. N. Edwards and Evangelos Mitsopoulos",
  title =        "Perceptual auditory design: Comments on {Edwards} and
                 {Mitsopoulos}, {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "450--454",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Edwards:2005:PMS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brewster:2005:DES,
  author =       "Stephen A. Brewster and Catherine V. Clarke",
  title =        "The design and evaluation of a sonically enhanced tool
                 palette",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "455--461",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Brewster:2005:SEW}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Brewster:2005:SEW,
  author =       "Stephen A. Brewster",
  title =        "Sonically-enhanced widgets: Comments on {Brewster} and
                 {Clarke}, {ICAD 1997}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "462--466",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Brewster:2005:DES}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Flowers:2005:DSD,
  author =       "John H. Flowers and Dion C. Buhman and Kimberly D.
                 Turnage",
  title =        "Data sonification from the desktop: Should sound be
                 part of standard data analysis software?",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "467--472",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Flowers:2005:DDS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Flowers:2005:DDS,
  author =       "John H. Flowers and Kimberly D. Turnage and Dion C.
                 Buhman",
  title =        "Desktop data sonification: Comments on {Flowers} et
                 al., {ICAD 1996}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "473--476",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Flowers:2005:DSD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vickers:2005:MPA,
  author =       "Paul Vickers and James L. Alty",
  title =        "Musical program auralization: Empirical studies",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "477--489",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Vickers:2005:PAA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vickers:2005:PAA,
  author =       "Paul Vickers",
  title =        "Program auralization: {Author}'s comments on {Vickers}
                 and {Alty}, {ICAD 2000}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "490--494",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Vickers:2005:MPA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fernstrom:2005:ADM,
  author =       "Mikael Fernstr{\"o}m and Caolan McNamara",
  title =        "After direct manipulation---direct sonification",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "495--499",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Fernstrom:2005:RSB}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fernstrom:2005:RSB,
  author =       "Mikael Fernstr{\"o}m",
  title =        "Reflections on sonic browsing: Comments on
                 {Fernstr{\"o}m} and {McNamara}, {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "500--504",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Fernstrom:2005:ADM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bonebright:2005:DCA,
  author =       "Terri L. Bonebright and Nadine E. Miner and Timothy E.
                 Goldsmith and Thomas P. Caudell",
  title =        "Data collection and analysis techniques for evaluating
                 the perceptual qualities of auditory stimuli",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "505--516",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Bonebright:2005:EAD}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bonebright:2005:EAD,
  author =       "Terri L. Bonebright and Nadine E. Miner",
  title =        "Evaluation of auditory displays: Comments on
                 {Bonebright} et al., {ICAD 1998}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "517--520",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Bonebright:2005:DCA}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Miner:2005:UWS,
  author =       "Nadine E. Miner and Thomas P. Caudell",
  title =        "Using wavelets to synthesize stochastic-based sounds
                 for immersive virtual environments",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "521--528",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Miner:2005:ACM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Miner:2005:ACM,
  author =       "Nadine E. Miner and Victor E. Vergara Panaiotis and
                 Thomas Preston Caudell",
  title =        "Authors' comments on {Miner} and {Caudell}, {ICAD
                 1997}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "529--533",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Miner:2005:UWS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{vandenDoel:2005:PBM,
  author =       "Kees van den Doel",
  title =        "Physically based models for liquid sounds",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "534--546",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{vandenDoel:2005:PSC}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{vandenDoel:2005:PSC,
  author =       "Kees van den Doel",
  title =        "From physics to sound: Comments on {van den Doel},
                 {ICAD 2004}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "547--549",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{vandenDoel:2005:PBM}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hermann:2005:CSH,
  author =       "Thomas Hermann and Helge Ritter",
  title =        "Crystallization sonification of high-dimensional
                 datasets",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "550--558",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Hermann:2005:MBS}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hermann:2005:MBS,
  author =       "Thomas Hermann and Helge Ritter",
  title =        "Model-based sonification revisited---authors' comments
                 on {Hermann} and {Ritter}, {ICAD 2002}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "559--563",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Hermann:2005:CSH}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Grohn:2005:CAV,
  author =       "Matti Gr{\"o}hn and Tapio Lokki and Tapio Takala",
  title =        "Comparison of auditory, visual, and audiovisual
                 navigation in a {$3$D} space",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "564--570",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See comments \cite{Grohn:2005:ACG}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Grohn:2005:ACG,
  author =       "Matti Gr{\"o}hn and Tapio Lokki and Tapio Takala",
  title =        "Author's comments on {Gr{\"o}hn}, {Lokki}, and
                 {Takala}, {ICAD 2003}",
  journal =      j-TAP,
  volume =       "2",
  number =       "4",
  pages =        "571--573",
  month =        oct,
  year =         "2005",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Dec 17 08:21:59 MST 2005",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  note =         "See \cite{Grohn:2005:CAV}.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Klatzky:2006:PRR,
  author =       "Roberta L. Klatzky and Susan J. Lederman",
  title =        "The perceived roughness of resistive virtual textures:
                 {I}. {Rendering} by a force-feedback mouse",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "1--14",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lederman:2006:PRR,
  author =       "Susan J. Lederman and Roberta L. Klatzky and Christine
                 Tong and Cheryl Hamilton",
  title =        "The perceived roughness of resistive virtual textures:
                 {II}. Effects of varying viscosity with a
                 force-feedback device",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "15--30",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Neumann:2006:IRP,
  author =       "Dirk Neumann and Karl R. Gegenfurtner",
  title =        "Image retrieval and perceptual similarity",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "31--47",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Niemenlehto:2006:DES,
  author =       "Pekka-Henrik Niemenlehto and Martti Juhola and Veikko
                 Surakka",
  title =        "Detection of electromyographic signals from facial
                 muscles with neural networks",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "48--61",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zana:2006:FRB,
  author =       "Yossi Zana and Roberto M. {Cesar, Jr.}",
  title =        "Face recognition based on polar frequency features",
  journal =      j-TAP,
  volume =       "3",
  number =       "1",
  pages =        "62--82",
  month =        jan,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Mar 14 07:36:58 MST 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2006:SMS,
  author =       "Jonathan W. Kelly and Andrew C. Beall and Jack M.
                 Loomis and Roy S. Smith and Kristen L. Macuga",
  title =        "Simultaneous measurement of steering performance and
                 perceived heading on a curving path",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "83--94",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Seuntiens:2006:PQC,
  author =       "Pieter Seuntiens and Lydia Meesters and Wijnand
                 Ijsselsteijn",
  title =        "Perceived quality of compressed stereoscopic images:
                 Effects of symmetric and asymmetric {JPEG} coding and
                 camera separation",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "95--109",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Marston:2006:ESD,
  author =       "James R. Marston and Jack M. Loomis and Roberta L.
                 Klatzky and Reginald G. Golledge and Ethan L. Smith",
  title =        "Evaluation of spatial displays for navigation without
                 sight",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "110--124",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Barbagli:2006:HDF,
  author =       "Federico Barbagli and Ken Salisbury and Cristy Ho and
                 Charles Spence and Hong Z. Tan",
  title =        "Haptic discrimination of force direction and the
                 influence of visual information",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "125--135",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Atkins:2006:AET,
  author =       "M. Stella Atkins and Adrian Moise and Robert
                 Rohling",
  title =        "An application of eyegaze tracking for designing
                 radiologists' workstations: Insights for comparative
                 visual search tasks",
  journal =      j-TAP,
  volume =       "3",
  number =       "2",
  pages =        "136--151",
  month =        apr,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Aug 23 14:16:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Interrante:2006:GE,
  author =       "Victoria Interrante",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "153--154",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lovell:2006:EMC,
  author =       "P. George Lovell and C. Alejandro P{\'a}rraga and Tom
                 Troscianko and Caterina Ripamonti and David J. Tolhurst",
  title =        "Evaluation of a multiscale color model for visual
                 difference prediction",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "155--178",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166089",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Langer:2006:PLM,
  author =       "Michael S. Langer and Javeen Pereira and Dipinder
                 Rekhi",
  title =        "Perceptual limits on {$2$D} motion-field
                 visualization",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "179--193",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166090",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2006:CFC,
  author =       "Bernhard E. Riecke and J{\"o}rg Schulte-Pelkum and
                 Marios N. Avraamides and Markus Von Der Heyde and
                 Heinrich H. B{\"u}lthoff",
  title =        "Cognitive factors can influence self-motion perception
                 (vection) in virtual reality",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "194--216",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166091",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McNamara:2006:EVA,
  author =       "Ann McNamara",
  title =        "Exploring visual and automatic measures of perceptual
                 fidelity in real and simulated imagery",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "217--238",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166092",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cooke:2006:OFV,
  author =       "Theresa Cooke and Sebastian Kannengiesser and
                 Christian Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Object feature validation using visual and haptic
                 similarity ratings",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "239--261",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166093",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Weidenbacher:2006:SSS,
  author =       "Ulrich Weidenbacher and Pierre Bayerl and Heiko
                 Neumann and Roland Fleming",
  title =        "Sketching shiny surfaces: {$3$D} shape extraction and
                 depiction of specular surfaces",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "262--285",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166094",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mantiuk:2006:PFC,
  author =       "Rafal Mantiuk and Karol Myszkowski and Hans-Peter
                 Seidel",
  title =        "A perceptual framework for contrast processing of high
                 dynamic range images",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "286--308",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166095",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Dixon:2006:MAF,
  author =       "T. D. Dixon and E. F. Canga and J. M. Noyes and T.
                 Troscianko and S. G. Nikolov and D. R. Bull and
                 C. N. Canagarajah",
  title =        "Methods for the assessment of fused images",
  journal =      j-TAP,
  volume =       "3",
  number =       "3",
  pages =        "309--332",
  month =        jul,
  year =         "2006",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1166087.1166096",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Oct 17 05:25:39 MDT 2006",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schwaninger:2006:PPM,
  author =       "Adrian Schwaninger and Julia Vogel and Franziska Hofer
                 and Bernt Schiele",
  title =        "A psychophysically plausible model for typicality
                 ranking of natural scenes",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "333--353",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Balas:2006:RBR,
  author =       "Benjamin J. Balas and Pawan Sinha",
  title =        "Region-based representations for face recognition",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "354--375",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Holten:2006:PBS,
  author =       "Danny Holten and Jarke J. {Van Wijk} and Jean-Bernard
                 Martens",
  title =        "A perceptually based spectral model for isotropic
                 textures",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "376--398",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ashikhmin:2006:RCT,
  author =       "Michael Ashikhmin and Jay Goyal",
  title =        "A reality check for tone-mapping operators",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "399--411",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wilcox:2006:PSV,
  author =       "Laurie M. Wilcox and Robert S. Allison and Samuel
                 Elfassy and Cynthia Grelik",
  title =        "Personal space in virtual reality",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "412--428",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Devlin:2006:VCC,
  author =       "Kate Devlin and Alan Chalmers and Erik Reinhard",
  title =        "Visual calibration and correction for ambient
                 illumination",
  journal =      j-TAP,
  volume =       "3",
  number =       "4",
  pages =        "429--452",
  month =        oct,
  year =         "2006",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:15 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Apfelbaum:2007:HAT,
  author =       "Henry Apfelbaum and Adar Pelah and Eli Peli",
  title =        "Heading assessment by ``tunnel vision'' patients and
                 control subjects standing or walking in a virtual
                 reality environment",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Durgin:2007:SFP,
  author =       "Frank H. Durgin and Catherine Reed and Cara Tigue",
  title =        "Step frequency and perceived self-motion",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fink:2007:OAD,
  author =       "Philip W. Fink and Patrick S. Foo and William H.
                 Warren",
  title =        "Obstacle avoidance during walking in real and virtual
                 environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Fortenbaugh:2007:GDC,
  author =       "Francesca C. Fortenbaugh and Sidhartha Chaudhury and
                 John C. Hicks and Lei Hao and Kathleen A. Turano",
  title =        "Gender differences in cue preference during path
                 integration in virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Frenz:2007:ETD,
  author =       "Harald Frenz and Markus Lappe and Marina Kolesnik and
                 Thomas B{\"u}hrmann",
  title =        "Estimation of travel distance from visual motion in
                 virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lichtenstein:2007:FCI,
  author =       "Lee Lichtenstein and James Barabas and Russell L.
                 Woods and Eli Peli",
  title =        "A feedback-controlled interface for treadmill
                 locomotion in virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mohler:2007:CLR,
  author =       "Betty J. Mohler and William B. Thompson and Sarah H.
                 Creem-Regehr and Peter Willemsen and Herbert L. {Pick,
                 Jr.} and John J. Rieser",
  title =        "Calibration of locomotion resulting from visual motion
                 in a treadmill-based virtual environment",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Pelah:2007:EWR,
  author =       "Adar Pelah and Jan J. Koenderink",
  title =        "Editorial: Walking in real and virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "1",
  pages =        "??--??",
  month =        jan,
  year =         "2007",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Apr 14 10:50:16 MDT 2007",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kuang:2007:EHR,
  author =       "Jiangtao Kuang and Hiroshi Yamaguchi and Changmeng Liu
                 and Garrett M. Johnson and Mark D. Fairchild",
  title =        "Evaluating {HDR} rendering algorithms",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1265957.1265958",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A series of three experiments has been performed to
                 test both the preference and accuracy of high
                 dynamic-range (HDR) rendering algorithms in digital
                 photography application. The goal was to develop a
                 methodology for testing a wide variety of previously
                 published tone-mapping algorithms for overall
                 preference and rendering accuracy. A number of
                 algorithms were chosen and evaluated first in a
                 paired-comparison experiment for overall image
                 preference. A rating-scale experiment was then designed
                 for further investigation of individual image
                 attributes that make up overall image preference. This
                 was designed to identify the correlations between image
                 attributes and the overall preference results obtained
                 from the first experiments. In a third experiment,
                 three real-world scenes with a diversity of dynamic
                 range and spatial configuration were designed and
                 captured to evaluate seven HDR rendering algorithms for
                 both of their preference and accuracy performance by
                 comparing the appearance of the physical scenes and the
                 corresponding tone-mapped images directly. In this
                 series of experiments, a modified Durand and Dorsey's
                 bilateral filter technique consistently performed well
                 for both preference and accuracy, suggesting that it is
                 a good candidate for a common algorithm that could be
                 included in future HDR algorithm testing evaluations.
                 The results of these experiments provide insight for
                 understanding of perceptual HDR image rendering and
                 should aid in design strategies for spatial processing
                 and tone mapping. The results indicate ways to improve
                 and design more robust rendering algorithms for general
                 HDR scenes in the future. Moreover, the purpose of this
                 research was not simply to find out the ``best''
                 algorithms, but rather to find a more general
                 psychophysical experiment based methodology to evaluate
                 HDR image-rendering algorithms. This paper provides an
                 overview of the many issues involved in an experimental
                 framework that can be used for these evaluations.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "High dynamic-range imaging; psychophysical
                 experiments; tone-mapping algorithms evaluation",
}

@Article{Tan:2007:DIF,
  author =       "Hong Z. Tan and Mandayam A. Srinivasan and Charlotte
                 M. Reed and Nathaniel I. Durlach",
  title =        "Discrimination and identification of finger
                 joint-angle position using active motion",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1265957.1265959",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The authors report six experiments on the human
                 ability to discriminate and identify finger joint-angle
                 positions using active motion. The PIP (proximal
                 interphalangeal) joint of the index finger was examined
                 in Exps. 1--3 and the MCP (metacarpophalangeal) joint
                 in Exps. 4--6. In Exp. 1, the just noticeable
                 difference (JND) of PIP joint-angle position was
                 measured when the MCP joint was either fully extended
                 or halfway bent. In Exp. 2, the JND of PIP joint-angle
                 position as a function of PIP joint-angle reference
                 position was measured when the PIP joint was almost
                 fully extended, halfway bent, or almost fully flexed.
                 In Exp. 3, the information transfer of PIP joint-angle
                 position was estimated with the MCP joint in a fully
                 extended position. In Exps. 4--6, the JND and the
                 information transfer of MCP joint-angle position were
                 studied with a similar experimental design. The results
                 show that the JNDs of the PIP joint-angle position were
                 roughly constant ($2.5^\circ$--$2.7^\circ$) independent
                 of the PIP joint-angle reference position or the MCP
                 joint-angle position used (Exps. 1 and 2). The JNDs of
                 the MCP joint-angle position, however, increased with
                 the flexion of both the PIP and MCP joints and ranged
                 from $1.7^\circ$ to $2.7^\circ$ (Exps. 4 and 5). The
                 information transfer of the PIP and MCP joint-angle
                 position were similar, indicating 3--4 perfectly
                 identifiable joint-angle positions for both joints
                 (Exps. 3 and 6). The results provide the basic data
                 needed for estimating, for example, the resolution of
                 fingertip position during active free motion. They are
                 compared to the results from previous studies on joint
                 position, length, and thickness perception.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "discrimination; haptic perception; identification;
                 JND; Joint position; kinesthesis",
}

@Article{Sprague:2007:MEV,
  author =       "Nathan Sprague and Dana Ballard and Al Robinson",
  title =        "Modeling embodied visual behaviors",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1265957.1265960",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "To make progess in understanding human visuomotor
                 behavior, we will need to understand its basic
                 components at an abstract level. One way to achieve
                 such an understanding would be to create a model of a
                 human that has a sufficient amount of complexity so as
                 to be capable of generating such behaviors. Recent
                 technological advances have been made that allow
                 progress to be made in this direction. Graphics models
                 that simulate extensive human capabilities can be used
                 as platforms from which to develop synthetic models of
                 visuomotor behavior. Currently, such models can capture
                 only a small portion of a full behavioral repertoire,
                 but for the behaviors that they do model, they can
                 describe complete visuomotor subsystems at a useful
                 level of detail. The value in doing so is that the
                 body's elaborate visuomotor structures greatly simplify
                 the specification of the abstract behaviors that guide
                 them. The net result is that, essentially, one is faced
                 with proposing an embodied ``operating system'' model
                 for picking the right set of abstract behaviors at each
                 instant. This paper outlines one such model. A
                 centerpiece of the model uses vision to aid the
                 behavior that has the most to gain from taking
                 environmental measurements. Preliminary tests of the
                 model against human performance in realistic VR
                 environments show that main features of the model show
                 up in human behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "reinforcement learning; visual attention; Visual
                 routines",
}

@Article{Williams:2007:FSS,
  author =       "Betsy Williams and Gayathri Narasimham and Claire
                 Westerman and John Rieser and Bobby Bodenheimer",
  title =        "Functional similarities in spatial representations
                 between real and virtual environments",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1265957.1265961",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper presents results that demonstrate
                 functional similarities in subjects' access to spatial
                 knowledge (or spatial representation ) between real and
                 virtual environments. Such representations are
                 important components of the transfer of reasoning
                 ability and knowledge between these two environments.
                 In particular, we present two experiments aimed at
                 investigating similarities in spatial knowledge derived
                 from exploring on foot both physical environments and
                 virtual environments presented through a head-mounted
                 display. In the first experiment, subjects were asked
                 to learn the locations of target objects in the real or
                 virtual environment and then rotate the perspective by
                 either physically locomoting to a new facing direction
                 or imagining moving. The latencies and errors were
                 generally worse after imagining locomoting and for
                 greater degrees of rotation in perspective; they did
                 not differ significantly across knowledge derived from
                 exploring the physical versus virtual environments. In
                 the second experiment, subjects were asked to imagine
                 simple rotations versus simple translations in
                 perspective. The errors and latencies indicated that
                 the to-be-imagined disparity was linearly related after
                 learning the physical and virtual environment. These
                 results demonstrate functional similarities in access
                 to knowledge of new perspective when it is learned by
                 exploring physical environments and virtual renderings
                 of the same environment.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "space perception; Virtual reality (VR)",
}

@Article{Ho:2007:DET,
  author =       "Hsin-Ni Ho and Lynette A. Jones",
  title =        "Development and evaluation of a thermal display for
                 material identification and discrimination",
  journal =      j-TAP,
  volume =       "4",
  number =       "2",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1265957.1265962",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:16:46 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The objective of this study was to develop and
                 evaluate a thermal display that assists in object
                 identification in virtual environments by simulating
                 the thermal cues associated with making contact with
                 materials with different thermal properties. The
                 thermal display was developed based on a semi-infinite
                 body model. Three experiments were conducted to
                 evaluate the performance of the display. The first
                 experiment compared the ability of subjects' to
                 identify various materials, which were presented
                 physically or simulated with the thermal display. The
                 second experiment examined the capacity of subjects to
                 discriminate between a real and simulated material
                 based on thermal cues. In the third experiment, the
                 changes in skin temperature that occurred when making
                 contact with real and simulated materials were measured
                 to evaluate how these compare to theoretical
                 predictions. The results indicated that there was no
                 significant difference in material identification and
                 discrimination when subjects were presented with real
                 or simulated materials. The changes in skin temperature
                 were comparable for real and simulated materials and
                 were related to the contact coefficient of the material
                 palpated, consistent with the semi-infinite body model.
                 These findings suggest that a thermal display is
                 capable of facilitating object recognition when visual
                 cues are limited.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "hand--object interaction; Haptic interface; material
                 identification; semi-infinite body model; thermal
                 display; thermal feedback; thermal perception; virtual
                 environment",
}

@Article{Thompson:2007:GE,
  author =       "William B. Thompson",
  title =        "Guest Editorial",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "14:1--14:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278388",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sundstedt:2007:PRP,
  author =       "Veronica Sundstedt and Diego Gutierrez and Oscar Anson
                 and Francesco Banterle and Alan Chalmers",
  title =        "Perceptual rendering of participating media",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "15:1--15:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278389",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High-fidelity image synthesis is the process of
                 computing images that are perceptually
                 indistinguishable from the real world they are
                 attempting to portray. Such a level of fidelity
                 requires that the physical processes of materials and
                 the behavior of light are accurately simulated. Most
                 computer graphics algorithms assume that light passes
                 freely between surfaces within an environment. However,
                 in many applications, we also need to take into account
                 how the light interacts with media, such as dust,
                 smoke, fog, etc., between the surfaces. The
                 computational requirements for calculating the
                 interaction of light with such participating media are
                 substantial. This process can take many hours and
                 rendering effort is often spent on computing parts of
                 the scene that may not be perceived by the viewer. In
                 this paper, we present a novel perceptual strategy for
                 physically based rendering of participating media. By
                 using a combination of a saliency map with our new
                 extinction map (X map), we can significantly reduce
                 rendering times for inhomogeneous media. The visual
                 quality of the resulting images is validated using two
                 objective difference metrics and a subjective
                 psychophysical experiment. Although the average pixel
                 errors of these metric are all less than 1\%, the
                 subjective validation indicates that the degradation in
                 quality still is noticeable for certain scenes. We thus
                 introduce and validate a novel light map (L map) that
                 accounts for salient features caused by multiple light
                 scattering around light sources.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "attention; extinction map; light map; Participating
                 media; perception; saliency map; selective rendering",
}

@Article{Wallraven:2007:ERW,
  author =       "Christian Wallraven and Heinrich H. B{\"u}lthoff and
                 Douglas W. Cunningham and Jan Fischer and Dirk Bartz",
  title =        "Evaluation of real-world and computer-generated
                 stylized facial expressions",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "16:1--16:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278390",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The goal of stylization is to provide an abstracted
                 representation of an image that highlights specific
                 types of visual information. Recent advances in
                 computer graphics techniques have made it possible to
                 render many varieties of stylized imagery efficiently
                 making stylization into a useful technique, not only
                 for artistic, but also for visualization applications.
                 In this paper, we report results from two sets of
                 experiments that aim at characterizing the perceptual
                 impact and effectiveness of three different stylization
                 techniques in the context of dynamic facial
                 expressions. In the first set of experiments, animated
                 facial expressions are stylized using three common
                 techniques (brush, cartoon, and illustrative
                 stylization) and investigated using different
                 experimental measures. Going beyond the usual
                 questionnaire approach, these experiments compare the
                 techniques according to several criteria ranging from
                 subjective preference to task-dependent measures (such
                 as recognizability, intensity) allowing us to compare
                 behavioral and introspective approaches. The second set
                 of experiments use the same stylization techniques on
                 real-world video sequences in order to compare the
                 effect of stylization on natural and artificial
                 stimuli. Our results shed light on how stylization of
                 image contents affects the perception and subjective
                 evaluation of both real and computer-generated facial
                 expressions.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "avatar; Evaluation of facial animations; facial
                 expressions; perceptually adaptive graphics;
                 psychophysics; stylization",
}

@Article{Majumder:2007:PBC,
  author =       "Aditi Majumder and Sandy Irani",
  title =        "Perception-based contrast enhancement of images",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "17:1--17:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278391",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Study of contrast sensitivity of the human eye shows
                 that our suprathreshold contrast sensitivity follows
                 the Weber Law and, hence, increases proportionally with
                 the increase in the mean local luminance. In this
                 paper, we effectively apply this fact to design a
                 contrast-enhancement method for images that improves
                 the local image contrast by controlling the local image
                 gradient with a single parameter. Unlike previous
                 methods, we achieve this without explicit segmentation
                 of the image, either in the spatial (multiscale) or
                 frequency (multiresolution) domain. We pose the
                 contrast enhancement as an optimization problem that
                 maximizes the average local contrast of an image
                 strictly constrained by a perceptual constraint derived
                 directly from the Weber Law. We then propose a greedy
                 heuristic, controlled by a single parameter, to
                 approximate this optimization problem.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "contrast enhancement; contrast sensitivity; Human
                 perception",
}

@Article{Seward:2007:UVE,
  author =       "A. Elizabeth Seward and Daniel H. Ashmead and Bobby
                 Bodenheimer",
  title =        "Using virtual environments to assess time-to-contact
                 judgments from pedestrian viewpoints",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "18:1--18:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278392",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper describes the use of desktop and immersive
                 virtual environments to study judgments that
                 pedestrians make when deciding to cross a street. In
                 particular, we assess the ability of people to
                 discriminate and estimate time-to-contact (TTC) for
                 approaching vehicles under a variety of conditions.
                 Four experiments observing TTC judgments under various
                 conditions are described. We examine the effect of type
                 of vehicle, viewpoint, presentation mode, and TTC value
                 on TTC judgments. We find no significant effect of type
                 of vehicle or of viewpoint, extending prior work to
                 cover all views typically encountered by pedestrians.
                 Discrimination of short values for TTC judgments is
                 generally consistent with the literature, but
                 performance degrades significantly for long TTC values.
                 Finally, we find no significant difference between
                 judgments made in a desktop environment versus a
                 head-mounted display, indicating that tracking the
                 approaching vehicle with one's head does not aid
                 discrimination. In general, people appear to use
                 strategies similar to those that pedestrians use to
                 make real-world, street-crossing decisions.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "time-to-contact (TTC); Virtual reality (VR)",
}

@Article{Vogel:2007:CNS,
  author =       "Julia Vogel and Adrian Schwaninger and Christian
                 Wallraven and Heinrich H. B{\"u}lthoff",
  title =        "Categorization of natural scenes: Local versus global
                 information and the role of color",
  journal =      j-TAP,
  volume =       "4",
  number =       "3",
  pages =        "19:1--19:??",
  month =        nov,
  year =         "2007",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278387.1278393",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:01 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Categorization of scenes is a fundamental process of
                 human vision that allows us to efficiently and rapidly
                 analyze our surroundings. Several studies have explored
                 the processes underlying human scene categorization,
                 but they have focused on processing global image
                 information. In this study, we present both
                 psychophysical and computational experiments that
                 investigate the role of local versus global image
                 information in scene categorization. In a first set of
                 human experiments, categorization performance is tested
                 when only local or only global image information is
                 present. Our results suggest that humans rely on local,
                 region-based information as much as on global,
                 configural information. In addition, humans seem to
                 integrate both types of information for intact scene
                 categorization. In a set of computational experiments,
                 human performance is compared to two state-of-the-art
                 computer vision approaches that have been shown to be
                 psychophysically plausible and that model either local
                 or global information. In addition to the influence of
                 local versus global information, in a second series of
                 experiments, we investigated the effect of color on the
                 categorization performance of both the human observers
                 and the computational model. Analysis of the human data
                 suggests that color is an additional channel of
                 perceptual information that leads to higher
                 categorization results at the expense of increased
                 reaction times in the intact condition. However, it
                 does not affect reaction times when only local
                 information is present. When color is removed, the
                 employed computational model follows the relative
                 performance decrease of human observers for each scene
                 category and can thus be seen as a perceptually
                 plausible model for human scene categorization based on
                 local image information.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "computational gist; computational modeling; global
                 configural information; local region-based information;
                 scene classification; Scene perception; semantic
                 modeling",
}

@Article{Akyuz:2008:PET,
  author =       "Ahmet O{\u{g}}uz Aky{\"u}z and Erik Reinhard",
  title =        "Perceptual evaluation of tone-reproduction operators
                 using the Cornsweet--Craik--{O}'Brien illusion",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278760.1278761",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "High dynamic-range images cannot be directly displayed
                 on conventional display devices, but have to be
                 tone-mapped first. For this purpose, a large set of
                 tone-reproduction operators is currently available.
                 However, it is unclear which operator is most suitable
                 for any given task. In addition, different tasks may
                 place different requirements upon each operator. In
                 this paper we evaluate several tone-reproduction
                 operators using a paradigm that does not require the
                 construction of a real high dynamic-range scene, nor
                 does it require the availability of a high
                 dynamic-range display device. The user study involves a
                 task that relates to the evaluation of contrast, which
                 is an important attribute that needs to be preserved
                 under tone reproduction.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "dynamic-range compression; high dynamic-range imaging;
                 Tone-mapping operators; visual psychophysics",
}

@Article{Radun:2008:CQI,
  author =       "Jenni Radun and Tuomas Leisti and Jukka H{\"a}kkinen
                 and Harri Ojanen and Jean-Luc Olives and Tero Vuori and
                 G{\"o}te Nyman",
  title =        "Content and quality: Interpretation-based estimation
                 of image quality",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278760.1278762",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Test image contents affect subjective image-quality
                 evaluations. Psychometric methods might show that
                 contents have an influence on image quality, but they
                 do not tell what this influence is like, i.e., how the
                 contents influence image quality. To obtain a holistic
                 description of subjective image quality, we have used
                 an interpretation-based quality (IBQ) estimation
                 approach, which combines qualitative and quantitative
                 methodology. The method enables simultaneous
                 examination of psychometric results and the subjective
                 meanings related to the perceived image-quality
                 changes. In this way, the relationship between
                 subjective feature detection, subjective preferences,
                 and interpretations are revealed. We report a study
                 that shows that different impressions are conveyed in
                 five test image contents after similar sharpness
                 variations. Thirty na{\"\i}ve observers classified and
                 freely described the images after which magnitude
                 estimation was used to verify that they distinguished
                 the changes in the images. The data suggest that in the
                 case of high image quality, the test image selection is
                 crucial. If subjective evaluation is limited only to
                 technical defects in test images, important subjective
                 information of image-quality experience is lost. The
                 approach described here can be used to examine image
                 quality and it will help image scientists to evaluate
                 their test images.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "image contents; Image quality; qualitative
                 methodology; subjective measurement",
}

@Article{VandenBerg:2008:PDI,
  author =       "Ronald {Van den Berg} and Frans W. Cornelissen and Jos
                 B. T. M. Roerdink",
  title =        "Perceptual dependencies in information visualization
                 assessed by complex visual search",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278760.1278763",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A common approach for visualizing data sets is to map
                 them to images in which distinct data dimensions are
                 mapped to distinct visual features, such as color, size
                 and orientation. Here, we consider visualizations in
                 which different data dimensions should receive equal
                 weight and attention. Many of the end-user tasks
                 performed on these images involve a form of visual
                 search. Often, it is simply assumed that features can
                 be judged independently of each other in such tasks.
                 However, there is evidence for perceptual dependencies
                 when simultaneously presenting multiple features. Such
                 dependencies could potentially affect information
                 visualizations that contain combinations of features
                 for encoding information and, thereby, bias subjects
                 into unequally weighting the relevance of different
                 data dimensions. We experimentally assess (1) the
                 presence of judgment dependencies in a visualization
                 task (searching for a target node in a node-link
                 diagram) and (2) how feature contrast relates to
                 salience. From a visualization point of view, our most
                 relevant findings are that (a) to equalize saliency
                 (and thus bottom-up weighting) of size and color, color
                 contrasts have to become very low. Moreover,
                 orientation is less suitable for representing
                 information that consists of a large range of data
                 values, because it does not show a clear relationship
                 between contrast and salience; (b) color and size are
                 features that can be used independently to represent
                 information, at least as far as the range of colors
                 that were used in our study are concerned; (c) the
                 concept of (static) feature salience hierarchies is
                 wrong; how salient a feature is compared to another is
                 not fixed, but a function of feature contrasts; (d)
                 final decisions appear to be as good an indicator of
                 perceptual performance as indicators based on measures
                 obtained from individual fixations. Eye tracking,
                 therefore, does not necessarily present a benefit for
                 user studies that aim at evaluating performance in
                 search tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Color; feature hierarchy; feature interaction; human
                 vision; information visualization; node-link diagrams;
                 orientation; perceptual dependencies; psychophysics;
                 visual features; visual search",
}

@Article{Wallraven:2008:EPR,
  author =       "Christian Wallraven and Martin Breidt and Douglas W.
                 Cunningham and Heinrich H. B{\"u}lthoff",
  title =        "Evaluating the perceptual realism of animated facial
                 expressions",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278760.1278764",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The human face is capable of producing an astonishing
                 variety of expressions---expressions for which
                 sometimes the smallest difference changes the perceived
                 meaning considerably. Producing realistic-looking
                 facial animations that are able to transmit this degree
                 of complexity continues to be a challenging research
                 topic in computer graphics. One important question that
                 remains to be answered is: When are facial animations
                 good enough? Here we present an integrated framework in
                 which psychophysical experiments are used in a first
                 step to systematically evaluate the perceptual quality
                 of several different computer-generated animations with
                 respect to real-world video sequences. The first
                 experiment provides an evaluation of several animation
                 techniques, exposing specific animation parameters that
                 are important to achieve perceptual fidelity. In a
                 second experiment, we then use these benchmarked
                 animation techniques in the context of perceptual
                 research in order to systematically investigate the
                 spatiotemporal characteristics of expressions. A third
                 and final experiment uses the quality measures that
                 were developed in the first two experiments to examine
                 the perceptual impact of changing facial features to
                 improve the animation techniques. Using such an
                 integrated approach, we are able to provide important
                 insights into facial expressions for both the
                 perceptual and computer graphics community.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "3D-scanning; avatar; evaluation of facial animations;
                 perceptually adaptive graphics; psychophysics;
                 recognition",
}

@Article{Jagnow:2008:EMA,
  author =       "Robert Jagnow and Julie Dorsey and Holly Rushmeier",
  title =        "Evaluation of methods for approximating shapes used to
                 synthesize {$3$D} solid textures",
  journal =      j-TAP,
  volume =       "4",
  number =       "4",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1278760.1278765",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:12 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In modern computer graphics applications, textures
                 play an important role in conveying the appearance of
                 real-world materials. But while surface appearance can
                 often be effectively captured with a photograph, it is
                 difficult to use example imagery to synthesize fully
                 three-dimensional (3D) solid textures that are
                 perceptually similar to their inputs. Specifically,
                 this research focuses on human perception of 3D solid
                 textures composed of aggregate particles in a binding
                 matrix. Holding constant an established algorithm for
                 approximating particle distributions, we examine the
                 problem of estimating particle shape. We consider four
                 methods for approximating plausible particle
                 shapes---including two methods of our own contribution.
                 We compare the performance of these methods under a
                 variety of input conditions using automated,
                 perceptually motivated metrics, as well as a
                 psychophysical experiment. In the course of assessing
                 the relative performance of the four algorithms, we
                 also evaluate the reliability of the automated metrics
                 in predicting the results of the experiment.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Shape estimation; shape perception; solid textures;
                 texture synthesis; volumetric textures",
}

@Article{Klatzky:2008:EAR,
  author =       "Roberta L. Klatzky and Bing Wu and Damion Shelton and
                 George Stetten",
  title =        "Effectiveness of augmented-reality visualization
                 versus cognitive mediation for learning actions in near
                 space",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279640.1279641",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The present study examined the impact of
                 augmented-reality visualization, in comparison to
                 conventional ultrasound (CUS), on the learning of
                 ultrasound-guided needle insertion. Whereas CUS
                 requires cognitive processes for localizing targets,
                 our augmented-reality device, called the ``sonic
                 flashlight'' (SF) enables direct perceptual guidance.
                 Participants guided a needle to an ultrasound-localized
                 target within opaque fluid. In three experiments, the
                 SF showed higher accuracy and lower variability in
                 aiming and endpoint placements than did CUS. The SF,
                 but not CUS, readily transferred to new targets and
                 starting points for action. These effects were evident
                 in visually guided action (needle and target
                 continuously visible) and visually directed action
                 (target alone visible). The results have application to
                 learning to visualize surgical targets through
                 ultrasound.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "augmented reality; learning; motor control;
                 Perception; spatial cognition",
}

@Article{Ware:2008:VGT,
  author =       "Colin Ware and Peter Mitchell",
  title =        "Visualizing graphs in three dimensions",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279640.1279642",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It has been known for some time that larger graphs can
                 be interpreted if laid out in 3D and displayed with
                 stereo and/or motion depth cues to support spatial
                 perception. However, prior studies were carried out
                 using displays that provided a level of detail far
                 short of what the human visual system is capable of
                 resolving. Therefore, we undertook a graph
                 comprehension study using a very high resolution
                 stereoscopic display. In our first experiment, we
                 examined the effect of stereoscopic display, kinetic
                 depth, and using 3D tubes versus lines to display the
                 links. The results showed a much greater benefit for 3D
                 viewing than previous studies. For example, with both
                 motion and stereoscopic depth cues, unskilled observers
                 could see paths between nodes in 333 node graphs with
                 less than a 10\% error rate. Skilled observers could
                 see up to a 1000-node graph with less than a 10\% error
                 rate. This represented an order of magnitude increase
                 over 2D display. In our second experiment, we varied
                 both nodes and links to understand the constraints on
                 the number of links and the size of graph that can be
                 reliably traced. We found the difference between number
                 of links and number of nodes to best account for error
                 rates and suggest that this is evidence for a
                 ``perceptual phase transition.'' These findings are
                 discussed in terms of their implications for
                 information display.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graph visualization; network visualization;
                 stereoscopic displays; Visualization",
}

@Article{Elhelw:2008:GBS,
  author =       "Mohamed Elhelw and Marios Nicolaou and Adrian Chung
                 and Guang-Zhong Yang and M. Stella Atkins",
  title =        "A gaze-based study for investigating the perception of
                 visual realism in simulated scenes",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279640.1279643",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual realism has been a major objective of computer
                 graphics since the inception of the field. However, the
                 perception of visual realism is not a well-understood
                 process and is usually attributed to a combination of
                 visual cues and image features that are difficult to
                 define or measure. For highly complex images, the
                 problem is even more involved. The purpose of this
                 paper is to present a study based on eye tracking for
                 investigating the perception of visual realism of
                 static images with different visual qualities. The
                 eye-fixation clusters helped to define salient image
                 features corresponding to 3D surface details and light
                 transfer properties that attract observers' attention.
                 This enabled the definition and categorization of image
                 attributes affecting the perception of photorealism.
                 The dynamics of the visual behavior of different
                 observer groups were examined by analyzing saccadic eye
                 movements. We also demonstrated how the different image
                 categories used in the experiments were perceived with
                 varying degrees of visual realism. The results
                 presented can be used as a basis for investigating the
                 impact of individual image features on the perception
                 of visual realism. This study suggests that post-recall
                 or simple abstraction of visual experience is not
                 accurate and the use of eye tracking provides an
                 effective way of determining relevant features that
                 affect visual realism, thus allowing for improved
                 rendering techniques that target these features.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "eye tracking; human--computer interaction;
                 photorealistic rendering; simulation environment;
                 Visual perception; visual realism",
}

@Article{Palmer:2008:EAT,
  author =       "Evan M. Palmer and Timothy C. Clausner and Philip J.
                 Kellman",
  title =        "Enhancing air traffic displays via perceptual cues",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279640.1279644",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We examined graphical representations of aircraft
                 altitude in simulated air traffic control (ATC)
                 displays. In two experiments, size and contrast cues
                 correlated with altitude improved participants' ability
                 to detect future aircraft collisions (conflicts).
                 Experiment 1 demonstrated that, across several set
                 sizes, contrast and size cues to altitude improved
                 accuracy at identifying conflicts. Experiment 2
                 demonstrated that graphical cues for representing
                 altitude both improved accuracy and reduced search time
                 for finding conflicts in large set size displays. The
                 addition of size and contrast cues to ATC displays may
                 offer specific benefits in aircraft conflict
                 detection.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "air traffic control; applied cognitive science;
                 Human--computer interaction; visualization",
}

@Article{Watters:2008:VDL,
  author =       "Paul Watters and Frances Martin and H. Steffen
                 Stripf",
  title =        "Visual detection of {LSB}-encoded natural image
                 steganography",
  journal =      j-TAP,
  volume =       "5",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279640.1328775",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Many steganographic systems embed hidden messages
                 inside the least significant bit layers of colour
                 natural images. The presence of these messages can be
                 difficult to detect by using statistical steganalysis.
                 However, visual steganalysis by humans may be more
                 successful in natural image discrimination. This study
                 examined whether humans could detect least-significant
                 bit steganography in 15 color natural images from the
                 VisTex database using a controlled same/different task
                 ($N = 58$) and a yes/no task ($N = 61$). While $d
                 \prime > 1$ was observed for color layers 4--8, layers
                 1--3 had $d \prime < 1$ in both experiments. Thus,
                 layers 1--3 appear to be highly resistant to visual
                 steganalysis.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "counterterrorism; Steganography",
}

@Article{Reinhard:2008:E,
  author =       "Erik Reinhard and Heinrich B{\"u}lthoff",
  title =        "Editorial",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1361703",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wang:2008:TSP,
  author =       "Qi Wang and Vincent Hayward",
  title =        "Tactile synthesis and perceptual inverse problems seen
                 from the viewpoint of contact mechanics",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1279921",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A contact-mechanics analysis was used to explain a
                 tactile illusion engendered by straining the fingertip
                 skin tangentially in a progressive wave pattern
                 resulting in the perception of a moving undulating
                 surface. We derived the strain tensor field induced by
                 a sinusoidal surface sliding on a finger as well as the
                 field created by a tactile transducer array deforming
                 the fingerpad skin by lateral traction. We found that
                 the first field could be well approximated by the
                 second. Our results have several implications. First,
                 tactile displays using lateral skin deformation can
                 generate tactile sensations similar to those using
                 normal skin deformation. Second, a synthesis approach
                 can achieve this result if some constraints on the
                 design of tactile stimulators are met. Third, the
                 mechanoreceptors embedded in the skin must respond to
                 the deviatoric part of the strain tensor field and not
                 to its volumetric part. Finally, many tactile stimuli
                 might represent, for the brain, an inverse problem to
                 be solved, such specific examples of ``tactile
                 metameres'' are given.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "computational tactile perception; contact mechanics;
                 Haptics; Lateral skin deformation; Tactile sensing;
                 Tactile synthesis; tactile transducers arrays",
}

@Article{Jay:2008:UHC,
  author =       "Caroline Jay and Robert Stevens and Roger Hubbold and
                 Mashhuda Glencross",
  title =        "Using haptic cues to aid nonvisual structure
                 recognition",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1279922",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Retrieving information presented visually is difficult
                 for visually disabled users. Current accessibility
                 technologies, such as screen readers, fail to convey
                 presentational layout or structure. Information
                 presented in graphs or images is almost impossible to
                 convey through speech alone. In this paper, we present
                 the results of an experimental study investigating the
                 role of touch (haptic) and auditory cues in aiding
                 structure recognition when visual presentation is
                 missing. We hypothesize that by guiding users toward
                 nodes in a graph structure using force fields, users
                 will find it easier to recognize overall structure.
                 Nine participants were asked to explore simple 3D
                 structures containing nodes (spheres or cubes) laid out
                 in various spatial configurations and asked to identify
                 the nodes and draw their overall structure. Various
                 combinations of haptic and auditory feedback were
                 explored. Our results demonstrate that haptic cues
                 significantly helped participants to quickly recognize
                 nodes and structure. Surprisingly, auditory cues alone
                 did not speed up node recognition; however, when they
                 were combined with haptics both node identification and
                 structure recognition significantly improved. This
                 result demonstrates that haptic feedback plays an
                 important role in enabling people to recall spatial
                 layout.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "accessibility; haptic perception; Multimodal cues;
                 visual disability",
}

@Article{Peters:2008:ACT,
  author =       "Robert J. Peters and Laurent Itti",
  title =        "Applying computational tools to predict gaze direction
                 in interactive visual environments",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1279923",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Future interactive virtual environments will be
                 ``attention-aware,'' capable of predicting, reacting
                 to, and ultimately influencing the visual attention of
                 their human operators. Before such environments can be
                 realized, it is necessary to operationalize our
                 understanding of the relevant aspects of visual
                 perception, in the form of fully automated
                 computational heuristics that can efficiently identify
                 locations that would attract human gaze in complex
                 dynamic environments. One promising approach to
                 designing such heuristics draws on ideas from
                 computational neuroscience. We compared several
                 neurobiologically inspired heuristics with eye-movement
                 recordings from five observers playing video games, and
                 found that human gaze was better predicted by
                 heuristics that detect outliers from the global
                 distribution of visual features than by purely local
                 heuristics. Heuristics sensitive to dynamic events
                 performed best overall. Further, heuristic prediction
                 power differed more between games than between
                 different human observers. While other factors clearly
                 also influence eye position, our findings suggest that
                 simple neurally inspired algorithmic methods can
                 account for a significant portion of human gaze
                 behavior in a naturalistic, interactive setting. These
                 algorithms may be useful in the implementation of
                 interactive virtual environments, both to predict the
                 cognitive state of human operators, as well as to
                 effectively endow virtual agents in the system with
                 humanlike visual behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Active vision; computational modeling; eye-movements;
                 immersive environments; video games; visual attention",
}

@Article{Tarr:2008:IFA,
  author =       "Michael J. Tarr and Athinodoros S. Georghiades and
                 Cullen D. Jackson",
  title =        "Identifying faces across variations in lighting:
                 Psychophysics and computation",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1279924",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Humans have the ability to identify objects under
                 varying lighting conditions with extraordinary
                 accuracy. We investigated the behavioral aspects of
                 this ability and compared it to the performance of the
                 illumination cones (IC) model of Belhumeur and Kriegman
                 [1998]. In five experiments, observers learned 10 faces
                 under a small subset of illumination directions. We
                 then tested observers' recognition ability under
                 different illuminations. Across all experiments,
                 recognition performance was found to be dependent on
                 the distance between the trained and tested
                 illumination directions. This effect was modulated by
                 the nature of the trained illumination directions.
                 Generalizations from frontal illuminations were
                 different than generalizations from extreme
                 illuminations. Similarly, the IC model was also
                 sensitive to whether the trained images were
                 near-frontal or extreme. Thus, we find that the nature
                 of the images in the training set affects the accuracy
                 of an object's representation under variable lighting
                 for both humans and the model. Beyond this general
                 correspondence, the microstructure of the
                 generalization patterns for both humans and the IC
                 model were remarkably similar, suggesting that the two
                 systems may employ related algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "face recognition; human psychophysics; Illumination
                 invariance; image-based models; object recognition",
}

@Article{Bicego:2008:DFC,
  author =       "Manuele Bicego and Enrico Grosso and Andrea Lagorio
                 and Gavin Brelstaff and Linda Brodo and Massimo
                 Tistarelli",
  title =        "Distinctiveness of faces: {A} computational approach",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1279925",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This paper develops and demonstrates an original
                 approach to face-image analysis based on identifying
                 distinctive areas of each individual's face by its
                 comparison to others in the population. The method
                 differs from most others---that we refer as unary
                 ---where salient regions are defined by analyzing only
                 images of the same individual. We extract a set of
                 multiscale patches from each face image before
                 projecting them into a common feature space. The degree
                 of ``distinctiveness'' of any patch depends on its
                 distance in feature space from patches mapped from
                 other individuals. First a pairwise analysis is
                 developed and then a simple generalization to the
                 multiple-face case is proposed. A perceptual
                 experiment, involving 45 observers, indicates the
                 method to be fairly compatible with how humans mark
                 faces as distinct. A quantitative example of face
                 authentication is also performed in order to show the
                 essential role played by the distinctive information. A
                 comparative analysis shows that performance of our
                 n-ary approach is as good as several contemporary
                 unary, or binary, methods, while tapping a
                 complementary source of information. Furthermore, we
                 show it can also provide a useful degree of
                 illumination invariance.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "face authentication; illumination changes; log-polar
                 representation",
}

@Article{Grave:2008:TMO,
  author =       "Justine Grave and Roland Bremond",
  title =        "A tone-mapping operator for road visibility
                 experiments",
  journal =      j-TAP,
  volume =       "5",
  number =       "2",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1279920.1361704",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Jun 16 14:17:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "One may wish to use computer graphic images to carry
                 out road visibility studies. Unfortunately, most
                 display devices still have a limited luminance dynamic
                 range, especially in driving simulators. In this paper,
                 we propose a tone-mapping operator (TMO) to compress
                 the luminance dynamic range while preserving the
                 driver's performance for a visual task relevant for a
                 driving situation. We address three display issues of
                 some consequences for road image display: luminance
                 dynamics, image quantization, and high minimum
                 displayable luminance. Our TMO characterizes the
                 effects of local adaptation with a bandpass
                 decomposition of the image using a Laplacian pyramid,
                 and processes the levels separately in order to mimic
                 the human visual system. The contrast perception model
                 uses the visibility level, a usual index in road
                 visibility engineering applications. To assess our
                 algorithm, a psychophysical experiment devoted to a
                 target detection task was designed. Using a Landolt
                 ring, the visual performances of 30 observers were
                 measured: they stared first at a high-dynamic range
                 image and then at the same image processed by a TMO and
                 displayed on a low-dynamic range monitor, for
                 comparison. The evaluation was completed with a visual
                 appearance evaluation. Our operator gives good
                 performances for three typical road situations (one in
                 daylight and two at night), after comparison with four
                 standard TMOs from the literature. The psychovisual
                 assessment of our TMO is limited to these driving
                 situations.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "HDR images; psychophysics; road visibility; visual
                 performance",
}

@Article{Nees:2008:DDT,
  author =       "Michael A. Nees and Bruce N. Walker",
  title =        "Data density and trend reversals in auditory graphs:
                 Effects on point-estimation and trend-identification
                 tasks",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1402236.1402237",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Auditory graphs --- displays that represent
                 quantitative information with sound --- have the
                 potential to make data (and therefore science) more
                 accessible for diverse user populations. No research to
                 date, however, has systematically addressed the
                 attributes of data that contribute to the complexity
                 (the ease or difficulty of comprehension) of auditory
                 graphs. A pair of studies examined the role of data
                 density (i.e., the number of discrete data points
                 presented per second) and the number of trend reversals
                 for both point-estimation and trend-identification
                 tasks with auditory graphs. For the point-estimation
                 task, more trend reversals led to performance
                 decrements. For the trend-identification task, a large
                 main effect was again observed for trend reversals, but
                 an interaction suggested that the effect of the number
                 of trend reversals was different across lower data
                 densities (i.e., as density increased from 1 to 2 data
                 points per second). Results are discussed in terms of
                 data sonification applications and rhythmic theories of
                 auditory pattern perception.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory display; Auditory graphs; sonification",
}

@Article{Lecuyer:2008:SMS,
  author =       "Anatole L{\'e}cuyer and Jean-Marie Burkhardt and
                 Chee-Hian Tan",
  title =        "A study of the modification of the speed and size of
                 the cursor for simulating pseudo-haptic bumps and
                 holes",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1402236.1402238",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In previous work on so-called pseudo-haptic textures,
                 we investigated the possibility of simulating
                 sensations of texture without haptic devices by using
                 the sole manipulation of the speed of a mouse cursor (a
                 technique called speed technique). In this paper, we
                 describe another technique (called Size technique) to
                 enhance the speed technique and simulate texture
                 sensations by varying the size of the cursor according
                 to the local height of the texture displayed on the
                 computer screen. With the size technique, the user
                 would see an increase (decrease) in cursor size
                 corresponding to a positive (negative) slope of the
                 texture. We have conducted a series of experiments to
                 study and compare the use of both the size and speed
                 technique for simulating simple shapes like bumps and
                 holes. In Experiment 1, our results showed that
                 participants could successfully identify bumps and
                 holes using the size technique alone. Performances
                 obtained with the size technique reached a similar
                 level of accuracy as found previously with the speed
                 technique alone. In Experiment 2, we determined a point
                 of subjective equality between bumps simulated by each
                 technique separately, which suggests that the two
                 techniques provide information that can be perceptually
                 equivalent. In Experiment 3, using paradoxical
                 situations of conflict between the two techniques, we
                 have found that participants' answers were more
                 influenced by the size technique, suggesting a
                 dominance of the size over the speed technique.
                 Furthermore, we have found a mutual reinforcement of
                 the techniques, i.e., when the two techniques were
                 consistently combined, the participants were more
                 efficient in identifying the simulated shapes. In
                 Experiment 4, we further observed the complex
                 interactions between the information associated with
                 the two techniques in the perception and in the
                 decision process related to the accurate identification
                 of bumps and holes. Taken together, our results promote
                 the use of both techniques for the low-cost simulation
                 of texture sensations in applications, such as
                 videogames, internet, and graphical user interfaces.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "bump; control/display ratio; cursor; hole;
                 Pseudo-haptic; size; speed; texture",
}

@Article{Amemiya:2008:LMI,
  author =       "Tomohiro Amemiya and Hideyuki Ando and Taro Maeda",
  title =        "Lead-me interface for a pulling sensation from
                 hand-held devices",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1402236.1402239",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When a small mass in a hand-held device oscillates
                 along a single axis with asymmetric acceleration
                 (strongly peaked in one direction and diffuse in the
                 other), the holder typically experiences a kinesthetic
                 illusion characterized by the sensation of being
                 continuously pushed or pulled by the device. This
                 effect was investigated because of its potential
                 application to a hand-held, nongrounded, haptic device
                 that can convey a sense of a continuous translational
                 force in one direction, which is a key missing piece in
                 haptic research. A 1 degree-of-freedom (DOF) haptic
                 device based on a crank-slider mechanism was
                 constructed. The device converts the constant rotation
                 of an electric motor into the constrained movement of a
                 small mass with asymmetric acceleration. The frequency
                 that maximizes the perceived movement offered by the
                 haptic device was investigated. Tests using three
                 subjects showed that for the prototype, the best
                 frequencies were 5 and 10 cycles per second.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Haptic perception; interface using illusionary
                 sensation; mobile device",
}

@Article{Fontana:2008:ADP,
  author =       "Federico Fontana and Davide Rocchesso",
  title =        "Auditory distance perception in an acoustic pipe",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1402236.1402240",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In a study of auditory distance perception, we
                 investigated the effects of exaggeration the acoustic
                 cue of reverberation where the intensity of sound did
                 not vary noticeably. The set of stimuli was obtained by
                 moving a sound source inside a 10.2-m long pipe having
                 a 0.3-m diameter. Twelve subjects were asked to listen
                 to a speech sound while keeping their head inside the
                 pipe and then to estimate the egocentric distance from
                 the sound source using a magnitude production
                 procedure. The procedure was repeated eighteen times
                 using six different positions of the sound source.
                 Results show that the point at which perceived distance
                 equals physical distance is located approximately 3.5 m
                 away from the listening point, with an average range of
                 distance estimates of approximately 3.3 m, i.e., 1.65
                 to 4.9 m. The absence of intensity cues makes the
                 acoustic pipe a potentially interesting modeling
                 paradigm for the design of auditory interfaces in which
                 distance is rendered independently of loudness. The
                 proposed acoustic environment also confirms the known
                 unreliability of certain distance cues.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Acoustic pipe; auditory display; distance perception",
}

@Article{Kuhl:2008:RRL,
  author =       "Scott A. Kuhl and Sarah H. Creem-Regehr and William B.
                 Thompson",
  title =        "Recalibration of rotational locomotion in immersive
                 virtual environments",
  journal =      j-TAP,
  volume =       "5",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2008",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1402236.1402241",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Sep 15 19:02:24 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This work uses an immersive virtual environment (IVE)
                 to examine how people maintain a calibration between
                 biomechanical and visual information for rotational
                 self-motion. First, we show that no rotational
                 recalibration occurs when visual and biomechanical
                 rates of rotation are matched. Next, we demonstrate
                 that mismatched physical and visual rotation rates
                 cause rotational recalibration. Although previous work
                 has shown that rotational locomotion can be
                 recalibrated in real environments, this work extends
                 the finding to virtual environments. We further show
                 that people do not completely recalibrate left and
                 right rotations independently when different
                 visual--biomechanical discrepancies are used for left
                 and right rotations during a recalibration phase.
                 Finally, since the majority of participants did not
                 notice mismatched physical and visual rotation rates,
                 we discuss the implications of using such mismatches to
                 enable IVE users to explore a virtual space larger than
                 the physical space they are in.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Perception; recalibration; rotation; virtual
                 environments",
}

@Article{Fleming:2009:GES,
  author =       "Roland Fleming and Michael Langer",
  title =        "Guest editorial: Special issue on {Applied Perception
                 in Graphics and Visualization (APGV07)}",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462049",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Morvan:2009:PAT,
  author =       "Yann Morvan and Carol O'Sullivan",
  title =        "A perceptual approach to trimming and tuning
                 unstructured lumigraphs",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462050",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a novel perceptual method to reduce the
                 visual redundancy of unstructured lumigraphs, an image
                 based representation designed for interactive
                 rendering. We combine features of the unstructured
                 lumigraph algorithm and image fidelity metrics to
                 efficiently rank the perceptual impact of the removal
                 of subregions of input views ({\em subviews\/}). We use
                 a greedy approach to estimate the order in which
                 subviews should be pruned to minimize perceptual
                 degradation at each step. Renderings using varying
                 numbers of subviews can then be easily visualized with
                 confidence that the retained subviews are well chosen,
                 thus facilitating the choice of how many to retain. The
                 regions of the input views that are left are repacked
                 into a texture atlas. Our method takes advantage of any
                 scene geometry information available but only requires
                 a very coarse approximation. We perform a user study to
                 validate its behaviour, as well as investigate the
                 impact of the choice of image fidelity metric as well
                 as that of user parameters. The three metrics
                 considered fall in the physical, statistical and
                 perceptual categories. The overall benefit of our
                 method is the semiautomation of the view selection
                 process, resulting in unstructured lumigraphs that are
                 thriftier in texture memory use and faster to render.
                 Using the same framework, we adjust the parameters of
                 the unstructured lumigraph algorithm to optimise it on
                 a scene by scene basis.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Image-based rendering; perceptual metrics",
}

@Article{McDonnell:2009:EEM,
  author =       "Rachel McDonnell and Sophie J{\"o}rg and Jessica K.
                 Hodgins and Fiona Newell and Carol O'Sullivan",
  title =        "Evaluating the effect of motion and body shape on the
                 perceived sex of virtual characters",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462051",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this paper, our aim is to determine factors that
                 influence the perceived sex of virtual characters. In
                 Experiment 1, four different model types were used:
                 highly realistic male and female models, an androgynous
                 character, and a point light walker. Three different
                 types of motion were applied to all models: motion
                 captured male and female walks, and neutral synthetic
                 walks. We found that both form and motion influence sex
                 perception for these characters: for neutral synthetic
                 motions, form determines perceived sex, whereas natural
                 motion affects the perceived sex of both androgynous
                 and realistic forms. These results indicate that the
                 use of neutral walks is better than creating ambiguity
                 by assigning an incongruent motion. In Experiment 2 we
                 investigated further the influence of body shape and
                 motion on realistic male and female models and found
                 that adding stereotypical indicators of sex to the body
                 shapes influenced sex perception. Also, that
                 exaggerated female body shapes influences sex
                 judgements more than exaggerated male shapes. These
                 results have implications for variety and realism when
                 simulating large crowds of virtual characters.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Lavoue:2009:LRM,
  author =       "Guillaume Lavou{\'e}",
  title =        "A local roughness measure for {$3$D} meshes and its
                 application to visual masking",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462052",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "3D models are subject to a wide variety of processing
                 operations such as compression, simplification or
                 watermarking, which may introduce some geometric
                 artifacts on the shape. The main issue is to maximize
                 the compression/simplification ratio or the watermark
                 strength while minimizing these visual degradations.
                 However few algorithms exploit the human visual system
                 to {\em hide\/} these degradations, while perceptual
                 attributes could be quite relevant for this task.
                 Particularly, the {\em masking effect\/} defines the
                 fact that one visual pattern can hide the visibility of
                 another. In this context we introduce an algorithm for
                 estimating the {\em roughness\/} of a 3D mesh, as a
                 local measure of geometric noise on the surface.
                 Indeed, a textured (or {\em rough\/}) region is able to
                 hide geometric distortions much better than a smooth
                 one. Our measure is based on curvature analysis on
                 local windows of the mesh and is independent of the
                 resolution/connectivity of the object. The accuracy and
                 the robustness of our measure, together with its
                 relevance regarding visual masking have been
                 demonstrated through extensive comparisons with
                 state-of-the-art and subjective experiment. Two
                 applications are also presented, in which the roughness
                 is used to lead (and improve) respectively compression
                 and watermarking algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "3D mesh; Curvature; Masking; Roughness; subjective
                 evaluation",
}

@Article{Murphy:2009:HIM,
  author =       "Hunter A. Murphy and Andrew T. Duchowski and Richard
                 A. Tyrrell",
  title =        "Hybrid image\slash model-based gaze-contingent
                 rendering",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462053",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A nonisotropic hybrid image/model-based
                 gaze-contingent rendering technique utilizing ray
                 casting on a GPU is discussed. Empirical evidence
                 derived from human subject experiments indicates an
                 inverse relationship between a peripherally degraded
                 scene's high-resolution inset size and mean search
                 time, a trend consistent with existing image-based and
                 model-based techniques. In addition, the data suggest
                 that maintaining a target's silhouette edges decreases
                 search times when compared to targets with degraded
                 edges. However, analysis suggests a point of
                 diminishing returns with an inset larger than
                 $15^\circ$ when target discrimination is a component of
                 visual search. Benefits of the hybrid technique include
                 simplicity of design and parallelizability, both
                 conducive to GPU implementation.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Eye tracking; Level of Detail",
}

@Article{Boucheny:2009:PEV,
  author =       "Christian Boucheny and Georges-Pierre Bonneau and
                 Jacques Droulez and Guillaume Thibault and Stephane
                 Ploix",
  title =        "A perceptive evaluation of volume rendering
                 techniques",
  journal =      j-TAP,
  volume =       "5",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jan,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1462048.1462054",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 2 14:38:02 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The display of space filling data is still a challenge
                 for the community of visualization. Direct volume
                 rendering (DVR) is one of the most important techniques
                 developed to achieve direct perception of such
                 volumetric data. It is based on semitransparent
                 representations, where the data are accumulated in a
                 depth-dependent order. However, it produces images that
                 may be difficult to understand, and thus several
                 techniques have been proposed so as to improve its
                 effectiveness, using for instance lighting models or
                 simpler representations (e.g., maximum intensity
                 projection). In this article, we present three
                 perceptual studies that examine how DVR meets its
                 goals, in either static or dynamic context. We show
                 that a static representation is highly ambiguous, even
                 in simple cases, but this can be counterbalanced by use
                 of dynamic cues (i.e., motion parallax) provided that
                 the rendering parameters are correctly tuned. In
                 addition, perspective projections are demonstrated to
                 provide relevant information to disambiguate depth
                 perception in dynamic displays.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Direct volume rendering; perception of transparency;
                 perspective projection; structure from motion",
}

@Article{Feixas:2009:UIT,
  author =       "Miquel Feixas and Mateu Sbert and Francisco
                 Gonz{\'a}lez",
  title =        "A unified information-theoretic framework for
                 viewpoint selection and mesh saliency",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hattenberger:2009:PIG,
  author =       "Timothy J. Hattenberger and Mark D. Fairchild and
                 Garrett M. Johnson and Carl Salvaggio",
  title =        "A psychophysical investigation of global illumination
                 algorithms used in augmented reality",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2009:NEF,
  author =       "Yanfang Li and Volkan Patoglu and Marcia K.
                 O'Malley",
  title =        "Negative efficacy of fixed gain error reducing shared
                 control for training in virtual environments",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gray:2009:SRC,
  author =       "Rob Gray and Rayka Mohebbi and Hong Z. Tan",
  title =        "The spatial resolution of crossmodal attention:
                 Implications for the design of multimodal interfaces",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2009:PIM,
  author =       "Li Li and Bernard D. Adelstein and Stephen R. Ellis",
  title =        "Perception of image motion during head movement",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Khan:2009:CPE,
  author =       "Masood Mehmood Khan and Robert D. Ward and Michael
                 Ingleby",
  title =        "Classifying pretended and evoked facial expressions of
                 positive and negative affective states using infrared
                 measurement of skin temperature",
  journal =      j-TAP,
  volume =       "6",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Feb 23 08:25:26 MST 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Riecke:2009:MSE,
  author =       "Bernhard E. Riecke and Aleksander V{\"a}ljam{\"a}e and
                 J{\"o}rg Schulte-Pelkum",
  title =        "Moving sounds enhance the visually-induced self-motion
                 illusion (circular vection) in virtual reality",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1498700.1498701",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "While rotating visual and auditory stimuli have long
                 been known to elicit self-motion illusions (``circular
                 vection''), audiovisual interactions have hardly been
                 investigated. Here, two experiments investigated
                 whether visually induced circular vection can be
                 enhanced by concurrently rotating auditory cues that
                 match visual landmarks (e.g., a fountain sound).
                 Participants sat behind a curved projection screen
                 displaying rotating panoramic renderings of a market
                 place. Apart from a no-sound condition, headphone-based
                 auditory stimuli consisted of mono sound, ambient
                 sound, or low-/high-spatial resolution auralizations
                 using generic head-related transfer functions (HRTFs).
                 While merely adding nonrotating (mono or ambient) sound
                 showed no effects, moving sound stimuli facilitated
                 both vection and presence in the virtual environment.
                 This spatialization benefit was maximal for a medium
                 ($20^\circ \times 15^\circ$) FOV, reduced for a larger
                 ($54^\circ \times 45^\circ$) FOV and unexpectedly
                 absent for the smallest ($10^\circ \times 7.5^\circ$)
                 FOV. Increasing auralization spatial fidelity (from
                 low, comparable to five-channel home theatre systems,
                 to high, $5^\circ$ resolution) provided no further
                 benefit, suggesting a ceiling effect. In conclusion,
                 both self-motion perception and presence can benefit
                 from adding moving auditory stimuli. This has important
                 implications both for multimodal cue integration
                 theories and the applied challenge of building
                 affordable yet effective motion simulators.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Audiovisual interactions; presence; psychophysics;
                 self-motion simulation; spatial sound; vection; virtual
                 reality",
}

@Article{Willemsen:2009:EHM,
  author =       "Peter Willemsen and Mark B. Colton and Sarah H.
                 Creem-Regehr and William B. Thompson",
  title =        "The effects of head-mounted display mechanical
                 properties and field of view on distance judgments in
                 virtual environments",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1498700.1498702",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Research has shown that people are able to judge
                 distances accurately in full-cue, real-world
                 environments using visually directed actions. However,
                 in virtual environments viewed with head-mounted
                 display (HMD) systems, there is evidence that people
                 act as though the virtual space is smaller than
                 intended. This is a surprising result given how well
                 people act in real environments. The behavior in the
                 virtual setting may be linked to distortions in the
                 available visual cues or to a person's ability to
                 locomote without vision. Either could result from
                 issues related to added mass, moments of inertia, and
                 restricted field of view in HMDs. This article
                 describes an experiment in which distance judgments
                 based on normal real-world and HMD viewing are compared
                 with judgments based on real-world viewing while
                 wearing two specialized devices. One is a mock HMD,
                 which replicated the mass, moments of inertia, and
                 field of view of the HMD and the other an inertial
                 headband designed to replicate the mass and moments of
                 inertia of the HMD, but constructed to not restrict the
                 field of view of the observer or otherwise feel like
                 wearing a helmet. Distance judgments using the mock HMD
                 showed a statistically significant underestimation
                 relative to the no restriction condition but not of a
                 magnitude sufficient to account for all the distance
                 compression seen in the HMD. Indicated distances with
                 the inertial headband were not significantly smaller
                 than those made with no restrictions.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "distance judgments; head-mounted displays;
                 Perception",
}

@Article{Duchowski:2009:SVS,
  author =       "Andrew T. Duchowski and David Bate and Paris
                 Stringfellow and Kaveri Thakur and Brian J. Melloy and
                 Anand K. Gramopadhye",
  title =        "On spatiochromatic visual sensitivity and peripheral
                 color {LOD} management",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1498700.1498703",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Empirical findings from a gaze-contingent color
                 degradation study report the effects of artificial
                 reduction of the human visual system's sensitivity to
                 peripheral chromaticity on visual search performance.
                 To our knowledge, this is the first such investigation
                 of peripheral color reduction. For unimpeded
                 performance, results suggest that, unlike
                 spatiotemporal content, peripheral chromaticity cannot
                 be reduced within the central $20^\circ$ visual angle.
                 Somewhat analogous to dark adaptation, reduction of
                 peripheral color tends to simulate scotopic viewing
                 conditions. This holds significant implications for
                 chromatic Level Of Detail management. Specifically,
                 while peripheral spatiotemporal detail can be
                 attenuated without affecting visual search, often
                 dramatically (e.g., spatial detail can be so reduced up
                 to 50\% at about $5^\circ$), peripheral chromatic
                 reduction is likely to be noticed much sooner.
                 Therefore, color LOD reduction (e.g., via compression),
                 should be maintained isotropically across the central
                 $20^\circ$ visual field.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Gaze-contingent displays",
}

@Article{Harper:2009:TDV,
  author =       "Simon Harper and Eleni Michailidou and Robert
                 Stevens",
  title =        "Toward a definition of visual complexity as an
                 implicit measure of cognitive load",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1498700.1498704",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The visual complexity of Web pages is much talked
                 about; ``complex Web pages are difficult to use,'' but
                 often regarded as a subjective decision by the user.
                 This subjective decision is of limited use if we wish
                 to understand the importance of visual complexity, what
                 it means, and how it can be used. We theorize that by
                 understanding a user's visual perception of Web page
                 complexity, we can understand the cognitive effort
                 required for interaction with that page. This is
                 important because by using an easily identifiable
                 measure, such as visual complexity, as an implicit
                 marker of cognitive load, we can design Web pages which
                 are easier to interact with. We have devised an initial
                 empirical experiment, using card sorting and triadic
                 elicitation, to test our theories and assumptions, and
                 have built an initial baseline sequence of 20 Web pages
                 along with a library of qualitative and anecdotal
                 feedback. Using this library, we define visual
                 complexity, ergo perceived interaction complexity, and
                 by taking these pages as ``prototypes'' and ranking
                 them into a sequence of complexity, we are able to
                 group them into: simple, neutral, and complex. This
                 means we can now work toward a definition of visual
                 complexity as an implicit measure of cognitive load.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "knowledge elicitation; semantic Web; visual
                 complexity; visual impairment; Web accessibility",
}

@Article{Canosa:2009:RWV,
  author =       "Roxanne L. Canosa",
  title =        "Real-world vision: Selective perception and task",
  journal =      j-TAP,
  volume =       "6",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1498700.1498705",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Apr 13 08:51:27 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual perception is an inherently selective process.
                 To understand when and why a particular region of a
                 scene is selected, it is imperative to observe and
                 describe the eye movements of individuals as they go
                 about performing specific tasks. In this sense, vision
                 is an active process that integrates scene properties
                 with specific, goal-oriented oculomotor behavior. This
                 study is an investigation of how task influences the
                 visual selection of stimuli from a scene. Four eye
                 tracking experiments were designed and conducted to
                 determine how everyday tasks affect oculomotor
                 behavior. A portable eyetracker was created for the
                 specific purpose of bringing the experiments out of the
                 laboratory and into the real world, where natural
                 behavior is most likely to occur. The experiments
                 provide evidence that the human visual system is not a
                 passive collector of salient environmental stimuli, nor
                 is vision general-purpose. Rather, vision is active and
                 specific, tightly coupled to the requirements of a task
                 and a plan of action. The experiments support the
                 hypothesis that the purpose of selective attention is
                 to maximize task efficiency by fixating relevant
                 objects in the scene. A computational model of visual
                 attention is presented that imposes a high-level
                 constraint on the bottom-up salient properties of a
                 scene for the purpose of locating regions that are
                 likely to correspond to foreground objects rather than
                 background or other salient nonobject stimuli. In
                 addition to improving the correlation to human subject
                 fixation densities over a strictly bottom-up model
                 [Itti et al. 1998; Parkhurst et al. 2002], this model
                 predicts a central fixation tendency when that tendency
                 is warranted, and not as an artificially primed
                 location bias.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Active vision; eye-tracking; saliency modeling",
}

@Article{Creem-Regehr:2009:GE,
  author =       "Sarah Creem-Regehr and Karol Myszkowski",
  title =        "Guest editorial",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577756",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2009:IRB,
  author =       "Rachel McDonnell and Sophie J{\"o}rg and Joanna McHugh
                 and Fiona N. Newell and Carol O'Sullivan",
  title =        "Investigating the role of body shape on the perception
                 of emotion",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577757",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In order to analyze the emotional content of motions
                 portrayed by different characters, we created real and
                 virtual replicas of an actor exhibiting six basic
                 emotions: sadness, happiness, surprise, fear, anger,
                 and disgust. In addition to the video of the real
                 actor, his actions were applied to five virtual body
                 shapes: a low- and high-resolution virtual counterpart,
                 a cartoon-like character, a wooden mannequin, and a
                 zombie-like character (Figures 1 and 2). In a point
                 light condition, we also tested whether the absence of
                 a body affected the perceived emotion of the movements.
                 Participants were asked to rate the actions based on a
                 list of 41 more complex emotions. We found that the
                 perception of emotional actions is highly robust and to
                 the most part independent of the character's body, so
                 long as form is present. When motion alone is present,
                 emotions were generally perceived as less intense than
                 in the cases where form was present.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Reitsma:2009:ESP,
  author =       "Paul S. A. Reitsma and Carol O'Sullivan",
  title =        "Effect of scenario on perceptual sensitivity to errors
                 in animation",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577758",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A deeper understanding of what makes animation
                 perceptually plausible would benefit a number of
                 applications, such as approximate collision detection
                 and goal-directed animation. In a series of
                 psychophysical experiments, we examine how measurements
                 of perceptual sensitivity in realistic physical
                 simulations compare to similar measurements done in
                 more abstract settings. We find that participant
                 tolerance for certain types of errors is significantly
                 higher in a realistic snooker scenario than in the
                 abstract test settings previously used to examine those
                 errors. By contrast, we find tolerance for errors
                 displayed in realistic but more neutral environments
                 was not different from tolerance for those errors in
                 abstract settings. Additionally, we examine the
                 interaction of auditory and visual cues in determining
                 participant sensitivity to spatiotemporal errors in
                 rigid body collisions. We find that participants are
                 predominantly affected by visual cues. Finally, we find
                 that tolerance for spatial gaps during collision events
                 is constant for a wide range of viewing angles if the
                 effect of foreshortening and occlusion caused by the
                 viewing angle is taken into account.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Animation; graphics; perception; psychophysics",
}

@Article{Munn:2009:FAI,
  author =       "Susan M. Munn and Jeff B. Pelz",
  title =        "{FixTag}: An algorithm for identifying and tagging
                 fixations to simplify the analysis of data collected by
                 portable eye trackers",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577759",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Video-based eye trackers produce an output video
                 showing where a subject is looking, the subject's
                 Point-of-Regard (POR), for each frame of a video of the
                 scene. This information can be extremely valuable, but
                 its analysis can be overwhelming. Analysis of
                 eye-tracked data from portable (wearable) eye trackers
                 is especially daunting, as the scene video may be
                 constantly changing, rendering automatic analysis more
                 difficult. A common way to begin analysis of POR data
                 is to group these data into fixations. In a previous
                 article, we compared the fixations identified (i.e.,
                 start and end marked) automatically by an algorithm to
                 those identified manually by users (i.e., manual
                 coders). Here, we extend this automatic identification
                 of fixations to tagging each fixation to a
                 Region-of-Interest (ROI). Our fixation tagging
                 algorithm, FixTag, requires the relative 3D positions
                 of the vertices of ROIs and calibration of the scene
                 camera. Fixation tagging is performed by first
                 calculating the camera projection matrices for
                 keyframes of the scene video (captured by the eye
                 tracker) via an iterative structure and motion recovery
                 algorithm. These matrices are then used to project 3D
                 ROI vertices into the keyframes. A POR for each
                 fixation is matched to a point in the closest keyframe,
                 which is then checked against the 2D projected ROI
                 vertices for tagging. Our fixation tags were compared
                 to those produced by three manual coders tagging the
                 automatically identified fixations for two different
                 scenarios. For each scenario, eight ROIs were defined
                 along with the 3D positions of eight calibration
                 points. Therefore, 17 tags were available for each
                 fixation: 8 for ROIs, 8 for calibration points, and 1
                 for ``other.'' For the first scenario, a subject was
                 tracked looking through products on four store shelves,
                 resulting in 182 automatically identified fixations.
                 Our automatic tagging algorithm produced tags that
                 matched those produced by at least one manual coder for
                 181 out of the 182 fixations (99.5\% agreement). For
                 the second scenario, a subject was tracked looking at
                 two posters on adjoining walls of a room. Our algorithm
                 matched at least one manual coder's tag for 169
                 fixations out of 172 automatically identified (98.3\%
                 agreement).",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "coding; eye tracking; Fixations; portable; wearable",
}

@Article{McNamara:2009:STP,
  author =       "Ann McNamara and Reynold Bailey and Cindy Grimm",
  title =        "Search task performance using subtle gaze direction
                 with the presence of distractions",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577760",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A new experiment is presented that demonstrates the
                 usefulness of an image space modulation technique
                 called subtle gaze direction (SGD) for guiding the user
                 in a simple searching task. SGD uses image space
                 modulations in the luminance channel to guide a
                 viewer's gaze about a scene without interrupting their
                 visual experience. The goal of SGD is to direct a
                 viewer's gaze to certain regions of a scene without
                 introducing noticeable changes in the image. Using a
                 simple searching task, we compared performance using no
                 modulation, using subtle modulation, and using obvious
                 modulation. Results from the experiments show improved
                 performance when using subtle gaze direction, without
                 affecting the user's perception of the image. We then
                 extend the experiment to evaluate performance with the
                 presence of distractors. The distractors took the form
                 of extra modulations, which do not correspond to a
                 target in the image. Experimentation shows, that, even
                 in the presence of distractors, more accurate results
                 are returned on a simple search task using SGD, as
                 compared to results returned when no modulation at all
                 is used. Results establish the potential of the method
                 for a wide range of applications including gaming,
                 perceptually based rendering, navigation in virtual
                 environments, and medical search tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Eye tracking; gaze direction; image manipulation;
                 luminance; psychophysics",
}

@Article{Filip:2009:URG,
  author =       "Ji{\v{r}}{\'\i} Filip and Michael J. Chantler and
                 Michal Haindl",
  title =        "On uniform resampling and gaze analysis of
                 bidirectional texture functions",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577761",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The use of illumination and view-dependent texture
                 information is recently the best way to capture the
                 appearance of real-world materials accurately. One
                 example is the Bidirectional Texture Function. The main
                 disadvantage of these data is their massive size. In
                 this article, we employ perceptually-based methods to
                 allow more efficient handling of these data. In the
                 first step, we analyse different uniform resampling by
                 means of a psychophysical study with 11 subjects,
                 comparing original data with rendering of a uniformly
                 resampled version over the hemisphere of illumination
                 and view-dependent textural measurements. We have found
                 that down-sampling in view and illumination azimuthal
                 angles is less apparent than in elevation angles and
                 that illumination directions can be down-sampled more
                 than view directions without loss of visual accuracy.
                 In the second step, we analyzed subjects gaze fixation
                 during the experiment. The gaze analysis confirmed
                 results from the experiment and revealed that subjects
                 were fixating at locations aligned with direction of
                 main gradient in rendered stimuli. As this gradient was
                 mostly aligned with illumination gradient, we conclude
                 that subjects were observing materials mainly in
                 direction of illumination gradient. Our results provide
                 interesting insights in human perception of real
                 materials and show promising consequences for
                 development of more efficient compression and rendering
                 algorithms using these kind of massive data.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "BTF; eye tracking; phychophysical experiment; texture
                 compression; uniform resampling; visual degradation",
}

@Article{Kuhl:2009:HCE,
  author =       "Scott A. Kuhl and William B. Thompson and Sarah H.
                 Creem-Regehr",
  title =        "{HMD} calibration and its effects on distance
                 judgments",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577762",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most head-mounted displays (HMDs) suffer from
                 substantial optical distortion, and vendor-supplied
                 specifications for field-of-view often are at variance
                 with reality. Unless corrected, such displays do not
                 present perspective-related visual cues in a
                 geometrically correct manner. Distorted geometry has
                 the potential to affect applications of HMDs, which
                 depend on precise spatial perception. This article
                 provides empirical evidence for the degree to which
                 common geometric distortions affect one type of spatial
                 judgment in virtual environments. We show that
                 minification or magnification in the HMD that would
                 occur from misstated HMD field of view causes
                 significant changes in distance judgments. Incorrectly
                 calibrated pitch and pincushion distortion, however, do
                 not cause statistically significant changes in distance
                 judgments for the degree of distortions examined. While
                 the means for determining the optical distortion of
                 display systems are well known, they are often not used
                 in non-see-through HMDs due to problems in measuring
                 and correcting for distortion. As a result, we also
                 provide practical guidelines for creating geometrically
                 calibrated systems.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "field of view; Immersive virtual environment;
                 minification; perception; pincushion distortion;
                 pitch",
}

@Article{Riecke:2009:ASM,
  author =       "Bernhard E. Riecke and Daniel Feuereissen and John J.
                 Rieser",
  title =        "Auditory self-motion simulation is facilitated by
                 haptic and vibrational cues suggesting the possibility
                 of actual motion",
  journal =      j-TAP,
  volume =       "6",
  number =       "3",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1577755.1577763",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Aug 31 16:34:11 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Sound fields rotating around stationary blindfolded
                 listeners sometimes elicit auditory circular vection,
                 the illusion that the listener is physically rotating.
                 Experiment 1 investigated whether auditory circular
                 vection depends on participants' situational awareness
                 of ``movability,'' that is, whether they sense/know
                 that actual motion is possible or not. While previous
                 studies often seated participants on movable chairs to
                 suspend the disbelief of self-motion, it has never been
                 investigated whether this does, in fact, facilitate
                 auditory vection. To this end, 23 blindfolded
                 participants were seated on a hammock chair with their
                 feet either on solid ground (``movement impossible'')
                 or suspended (``movement possible'') while listening to
                 individualized binaural recordings of two sound sources
                 rotating synchronously at $60^\circ / s$. Although
                 participants never physically moved, situational
                 awareness of movability facilitated auditory vection.
                 Moreover, adding slight vibrations like the ones
                 resulting from actual chair rotation increased the
                 frequency and intensity of vection. Experiment 2
                 extended these findings and showed that
                 nonindividualized binaural recordings were as effective
                 in inducing auditory circular vection as individualized
                 recordings. These results have important implications
                 both for our theoretical understanding of self-motion
                 perception and for the applied field of self-motion
                 simulations, where vibrations, nonindividualized
                 binaural sound, and the cognitive/perceptual framework
                 of movability can typically be provided at minimal cost
                 and effort.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory vection; circular vection; cue-integration;
                 higher-level/cognitive influences; HRTF; human factors;
                 individualized binaural recordings; psychophysics;
                 Self-motion illusions; self-motion simulation; spatial
                 sound; vibrations; virtual reality",
}

@Article{Bodenheimer:2009:GE,
  author =       "Bobby Bodenheimer and Carol O'Sullivan",
  title =        "Guest editorial",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "21:1--21:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609968",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2009:TBS,
  author =       "Rachel McDonnell and Cathy Ennis and Simon Dobbyn and
                 Carol O'Sullivan",
  title =        "Talking bodies: Sensitivity to desynchronization of
                 conversations",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "22:1--22:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we investigate human sensitivity to
                 the coordination and timing of conversational body
                 language for virtual characters. First, we captured the
                 full body motions (excluding faces and hands) of three
                 actors conversing about a range of topics, in either a
                 polite (i.e., one person talking at a time) or
                 debate/argument style. Stimuli were then created by
                 applying the motion-captured conversations from the
                 actors to virtual characters. In a 2AFC experiment,
                 participants viewed paired sequences of synchronized
                 and desynchronized conversations and were asked to
                 guess which was the real one. Detection performance was
                 above chance for both conversation styles but more so
                 for the polite conversations, where desynchronization
                 was more noticeable.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "graphics; motion capture; Perception",
}

@Article{Jimenez:2009:SSP,
  author =       "Jorge Jimenez and Veronica Sundstedt and Diego
                 Gutierrez",
  title =        "Screen-space perceptual rendering of human skin",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "23:1--23:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609970",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We propose a novel skin shader which translates the
                 simulation of subsurface scattering from texture space
                 to a screen-space diffusion approximation. It naturally
                 scales well while maintaining a perceptually plausible
                 result. This technique allows us to ensure real-time
                 performance even when several characters may appear on
                 screen at the same time. The visual realism of the
                 resulting images is validated using a subjective
                 psychophysical preference experiment. Our results show
                 that, independent of distance and light position, the
                 images rendered using our novel shader have as high
                 visual realism as a previously developed
                 physically-based shader.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "perception; psychophysics; Real-time skin rendering",
}

@Article{Yu:2009:PIA,
  author =       "Insu Yu and Andrew Cox and Min H. Kim and Tobias
                 Ritschel and Thorsten Grosch and Carsten Dachsbacher
                 and Jan Kautz",
  title =        "Perceptual influence of approximate visibility in
                 indirect illumination",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "24:1--24:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609971",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we evaluate the use of approximate
                 visibility for efficient global illumination.
                 Traditionally, accurate visibility is used in light
                 transport. However, the indirect illumination we
                 perceive on a daily basis is rarely of high-frequency
                 nature, as the most significant aspect of light
                 transport in real-world scenes is diffuse, and thus
                 displays a smooth gradation. This raises the question
                 of whether accurate visibility is perceptually
                 necessary in this case. To answer this question, we
                 conduct a psychophysical study on the perceptual
                 influence of approximate visibility on indirect
                 illumination. This study reveals that accurate
                 visibility is not required and that certain
                 approximations may be introduced.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Global illumination; perception; visibility",
}

@Article{Morvan:2009:HOT,
  author =       "Yann Morvan and Carol O'Sullivan",
  title =        "Handling occluders in transitions from panoramic
                 images: {A} perceptual study",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "25:1--25:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609972",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Panoramic images are very effective at conveying a
                 visual sense of presence at very low cost and great
                 ease of authoring. They are, however, limited in the
                 navigation options they offer, unlike 3D
                 representations. It is therefore desirable to provide
                 pleasing transitions from one panorama to another, or
                 from a panorama to a 3D model. We focus on motions
                 where the viewers move toward an area of interest, and
                 on the problem of dealing with occluders in their path.
                 We discuss existing transition approaches, with
                 emphasis on the additional information they require and
                 on the constraints they place on the authoring process.
                 We propose a compromise approach based on faking the
                 parallax effect with occluder mattes. We conduct a user
                 study to determine whether additional information does
                 in fact increase the visual appeal of transitions. We
                 observe that the creation of occluder mattes alone is
                 only justified if the fake parallax effect can be
                 synchronized with the camera motion (but not
                 necessarily consistent with it), and if viewpoint
                 discrepancies at occlusion boundaries are small. The
                 faster the transition, the less perceptual value there
                 is in creating mattes. Information on view alignment is
                 always useful, as a dissolve effect is always preferred
                 to fading to black and back.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "content mixing; occlusion; panorama; transitioning;
                 User study",
}

@Article{To:2009:PDN,
  author =       "M. P. S. To and I. D. Gilchrist and T. Troscianko and
                 J. S. B. Kho and D. J. Tolhurst",
  title =        "Perception of differences in natural-image stimuli:
                 Why is peripheral viewing poorer than foveal?",
  journal =      j-TAP,
  volume =       "6",
  number =       "4",
  pages =        "26:1--26:??",
  month =        sep,
  year =         "2009",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1609967.1609973",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Oct 1 09:18:09 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Visual Difference Predictor (VDP) models have played a
                 key role in digital image applications such as the
                 development of image quality metrics. However, little
                 attention has been paid to their applicability to
                 peripheral vision. Central (i.e., foveal) vision is
                 extremely sensitive for the contrast detection of
                 simple stimuli such as sinusoidal gratings, but
                 peripheral vision is less sensitive. Furthermore,
                 crowding is a well-documented phenomenon whereby
                 differences in suprathreshold peripherally viewed
                 target objects (such as individual letters or patches
                 of sinusoidal grating) become more difficult to
                 discriminate when surrounded by other objects
                 (flankers). We examine three factors that might
                 influence the degree of crowding with natural-scene
                 stimuli (cropped from photographs of natural scenes):
                 (1) location in the visual field, (2) distance between
                 target and flankers, and (3) flanker-target similarity.
                 We ask how these factors affect crowding in a
                 suprathreshold discrimination experiment where
                 observers rate the perceived differences between two
                 sequentially presented target patches of natural
                 images. The targets might differ in the shape, size,
                 arrangement, or color of items in the scenes. Changes
                 in uncrowded peripheral targets are perceived to be
                 less than for the same changes viewed foveally.
                 Consistent with previous research on simple stimuli, we
                 find that crowding in the periphery (but not in the
                 fovea) reduces the magnitudes of perceived changes even
                 further, especially when the flankers are closer and
                 more similar to the target. We have tested VDP models
                 based on the response behavior of neurons in visual
                 cortex and the inhibitory interactions between them.
                 The models do not explain the lower ratings for
                 peripherally viewed changes even when the lower
                 peripheral contrast sensitivity was accounted for; nor
                 could they explain the effects of crowding, which
                 others have suggested might arise from errors in the
                 spatial localization of features in the peripheral
                 image. This suggests that conventional VDP models do
                 not port well to peripheral vision.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "crowding; image difference metrics; peripheral vision;
                 Peripheral vision; psychophysical testing; VDP models",
}

@Article{Bonneel:2010:BPA,
  author =       "Nicolas Bonneel and Clara Suied and Isabelle
                 Viaud-Delmon and George Drettakis",
  title =        "Bimodal perception of audio-visual material properties
                 for virtual environments",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rienks:2010:DHO,
  author =       "Rutger Rienks and Ronald Poppe and Dirk Heylen",
  title =        "Differences in head orientation behavior for speakers
                 and listeners: An experiment in a virtual environment",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Giudice:2010:SLN,
  author =       "Nicholas A. Giudice and Jonathan Z. Bakdash and Gordon
                 E. Legge and Rudrava Roy",
  title =        "Spatial learning and navigation using a virtual verbal
                 display",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lu:2010:VCE,
  author =       "Aidong Lu and Ross Maciejewski and David S. Ebert",
  title =        "Volume composition and evaluation using eye-tracking
                 data",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Berger:2010:SBF,
  author =       "Daniel R. Berger and J{\"o}rg Schulte-Pelkum and
                 Heinrich H. B{\"u}lthoff",
  title =        "Simulating believable forward accelerations on a
                 {Stewart} motion platform",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Frintrop:2010:CVA,
  author =       "Simone Frintrop and Erich Rome and Henrik I.
                 Christensen",
  title =        "Computational visual attention systems and their
                 cognitive foundations: {A} survey",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Cooke:2010:MSA,
  author =       "Theresa Cooke and Christian Wallraven and Heinrich H.
                 B{\"u}lthoff",
  title =        "Multidimensional scaling analysis of haptic
                 exploratory procedures",
  journal =      j-TAP,
  volume =       "7",
  number =       "1",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:12 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Shamir:2010:IES,
  author =       "Lior Shamir and Tomasz Macura and Nikita Orlov and D.
                 Mark Eckley and Ilya G. Goldberg",
  title =        "Impressionism, expressionism, surrealism: Automated
                 recognition of painters and schools of art",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mania:2010:CTS,
  author =       "Katerina Mania and Shahrul Badariah and Matthew Coxon
                 and Phil Watten",
  title =        "Cognitive transfer of spatial awareness states from
                 immersive virtual environments to reality",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{VanMensvoort:2010:PMO,
  author =       "Koert {Van Mensvoort} and Peter Vos and Dik J. Hermes
                 and Robert {Van Liere}",
  title =        "Perception of mechanically and optically simulated
                 bumps and holes",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Souman:2010:MVW,
  author =       "Jan L. Souman and Paolo Robuffo Giordano and Ilja
                 Frissen and Alessandro De Luca and Marc O. Ernst",
  title =        "Making virtual walking real: Perceptual evaluation of
                 a new treadmill control algorithm",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kim:2010:MSH,
  author =       "Youngmin Kim and Amitabh Varshney and David W. Jacobs
                 and Fran{\c{c}}ois Guimbreti{\`e}re",
  title =        "Mesh saliency and human eye fixations",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Newsham:2010:CLQ,
  author =       "Guy R. Newsham and Duygu Cetegen and Jennifer A.
                 Veitch and Lorne Whitehead",
  title =        "Comparing lighting quality evaluations of real scenes
                 with those from high dynamic range and conventional
                 images",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "13:1--13:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mion:2010:POA,
  author =       "Luca Mion and Giovanni {De Poli} and Ennio
                 Rapan{\`a}",
  title =        "Perceptual organization of affective and sensorial
                 expressive intentions in music performance",
  journal =      j-TAP,
  volume =       "7",
  number =       "2",
  pages =        "14:1--14:??",
  month =        feb,
  year =         "2010",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Mar 15 18:53:15 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Blank:2010:IRP,
  author =       "Amy Blank and Allison M. Okamura and Katherine J.
                 Kuchenbecker",
  title =        "Identifying the role of proprioception in upper-limb
                 prosthesis control: Studies on targeted motion",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773966",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Proprioception plays a crucial role in enabling humans
                 to move purposively and interact with their physical
                 surroundings. Current technology in upper-limb
                 prostheses, while beginning to incorporate some haptic
                 feedback, does not provide amputees with proprioceptive
                 information about the state of the limb. Thus, the
                 wearer must visually monitor the limb, which is often
                 inconvenient or even impossible for some tasks. This
                 work seeks to quantify the potential benefits of
                 incorporating proprioceptive motion feedback into
                 upper-limb prosthesis designs. We apply a noninvasive
                 method for controlling the availability of
                 proprioceptive motion feedback in unimpaired
                 individuals in a human subject study to compare the
                 benefits of visual and proprioceptive motion feedback
                 in targeted motion tasks. Combined results of the
                 current study and our previous study using a different
                 task indicate that the addition of proprioceptive
                 motion feedback improves targeting accuracy under
                 nonsighted conditions and, for some tasks, under
                 sighted conditions as well. This work motivates the
                 development of methods for providing artificial
                 proprioceptive feedback to a prosthesis wearer.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Human psychophysics; motion control; proprioception;
                 prosthetic limb control; vision",
}

@Article{Radun:2010:EMV,
  author =       "Jenni Radun and Tuomas Leisti and Toni Virtanen and
                 Jukka H{\"a}kkinen and Tero Vuori and G{\"o}te Nyman",
  title =        "Evaluating the multivariate visual quality performance
                 of image-processing components",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773967",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The estimation of image quality is a demanding task,
                 especially when estimating different high-quality
                 imaging products or their components. The challenge is
                 the multivariate nature of image quality as well as the
                 need to use na{\"\i}ve observers as test subjects,
                 since they are the actual end-users of the products.
                 Here, we use a subjective approach suitable for
                 estimating the quality performance of different imaging
                 device components with na{\"\i}ve observers --- the
                 interpretation-based quality (IBQ) approach. From two
                 studies with 61 na{\"\i}ve observers, 17 natural
                 image contents, and 13 different camera image signal
                 processor pipelines, we determined the subjectively
                 crucial image quality attributes and dimensions and the
                 description of each pipeline's perceived image quality
                 performance. We found that the subjectively most
                 important image quality dimensions were color
                 shift/naturalness, darkness, and sharpness. The first
                 dimension, which was related to naturalness and colors,
                 distinguished the good-quality pipelines from the
                 middle- and low-quality groups, and the dimensions of
                 darkness and sharpness described why the quality failed
                 in the low-quality pipelines. The study suggests that
                 the high-level concept naturalness is a requirement for
                 high-quality images, whereas quality can fail for other
                 reasons in low-quality images, and this failure can be
                 described by low-level concepts, such as darkness and
                 sharpness.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "correspondence analysis; Image quality; qualitative
                 methodology; quality dimensions; subjective
                 measurements",
}

@Article{Andersen:2010:WME,
  author =       "Tue Haste Andersen and Shumin Zhai",
  title =        "``Writing with music'': Exploring the use of auditory
                 feedback in gesture interfaces",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773968",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigate the use of auditory feedback in
                 pen-gesture interfaces in a series of informal and
                 formal experiments. Initial iterative exploration
                 showed that gaining performance advantage with auditory
                 feedback was possible using absolute cues and state
                 feedback after the gesture was produced and recognized.
                 However, gaining learning or performance advantage from
                 auditory feedback tightly coupled with the pen-gesture
                 articulation and recognition process was more
                 difficult. To establish a systematic baseline,
                 Experiment 1 formally evaluated gesture production
                 accuracy as a function of auditory and visual feedback.
                 Size of gestures and the aperture of the closed
                 gestures were influenced by the visual or auditory
                 feedback, while other measures such as shape distance
                 and directional difference were not, supporting the
                 theory that feedback is too slow to strongly influence
                 the production of pen stroke gestures. Experiment 2
                 focused on the subjective aspects of auditory feedback
                 in pen-gesture interfaces. Participants' rating on the
                 dimensions of being wonderful and stimulating was
                 significantly higher with musical auditory feedback.
                 Several lessons regarding pen gestures and auditory
                 feedback are drawn from our exploration: a few simple
                 functions such as indicating the pen-gesture
                 recognition results can be achieved, gaining
                 performance and learning advantage through tightly
                 coupled process-based auditory feedback is difficult,
                 pen-gesture sets and their recognizers can be designed
                 to minimize visual dependence, and people's subjective
                 experience of gesture interaction can be influenced
                 using musical auditory feedback. These lessons may
                 serve as references and stepping stones toward future
                 research and development in pen-gesture interfaces with
                 auditory feedback.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Audio; auditory interface; feedback; gesture; music;
                 pen; sound; text input",
}

@Article{Kim:2010:PGG,
  author =       "Juno Kim and Stephen A. Palmisano and April Ash and
                 Robert S. Allison",
  title =        "Pilot gaze and glideslope control",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We examined the eye movements of pilots as they
                 carried out simulated aircraft landings under day and
                 night lighting conditions. Our five students and five
                 certified pilots were instructed to quickly achieve and
                 then maintain a constant 3-degree glideslope relative
                 to the runway. However, both groups of pilots were
                 found to make significant glideslope control errors,
                 especially during simulated night approaches. We found
                 that pilot gaze was directed most often toward the
                 runway and to the ground region located immediately in
                 front of the runway, compared to other visual scene
                 features. In general, their gaze was skewed toward the
                 near half of the runway and tended to follow the runway
                 threshold as it moved on the screen. Contrary to
                 expectations, pilot gaze was not consistently directed
                 at the aircraft's simulated aimpoint (i.e., its
                 predicted future touchdown point based on scene
                 motion). However, pilots did tend to fly the aircraft
                 so that this point was aligned with the runway
                 threshold. We conclude that the supplementary
                 out-of-cockpit visual cues available during day landing
                 conditions facilitated glideslope control performance.
                 The available evidence suggests that these
                 supplementary visual cues are acquired through
                 peripheral vision, without the need for active
                 fixation.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "aviation; gaze; glideslope control; landing; Vision",
}

@Article{Kjellin:2010:EVS,
  author =       "Andreas Kjellin and Lars Winkler Pettersson and Stefan
                 Seipel and Mats Lind",
  title =        "Evaluating {$2$D} and {$3$D} visualizations of
                 spatiotemporal information",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773970",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Time-varying geospatial data presents some specific
                 challenges for visualization. Here, we report the
                 results of three experiments aiming at evaluating the
                 relative efficiency of three existing visualization
                 techniques for a class of such data. The class chosen
                 was that of object movement, especially the movements
                 of vehicles in a fictitious landscape. Two different
                 tasks were also chosen. One was to predict where three
                 vehicles will meet in the future given a visualization
                 of their past movement history. The second task was to
                 estimate the order in which four vehicles arrived at a
                 specific place. Our results reveal that previous
                 findings had generalized human perception in these
                 situations and that large differences in user
                 efficiency exist for a given task between different
                 types of visualizations depicting the same data.
                 Furthermore, our results are in line with earlier
                 general findings on the nature of human perception of
                 both object shape and scene changes. Finally, the need
                 for new taxonomies of data and tasks based on results
                 from perception research is discussed.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "2D; 3D; animation; space--time cube; spatiotemporal;
                 user studies",
}

@Article{Pineo:2010:NMF,
  author =       "Daniel Pineo and Colin Ware",
  title =        "Neural modeling of flow rendering effectiveness",
  journal =      j-TAP,
  volume =       "7",
  number =       "3",
  pages =        "20:1--20:??",
  month =        jun,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1773965.1773971",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:16 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "It has been previously proposed that understanding the
                 mechanisms of contour perception can provide a theory
                 for why some flow rendering methods allow for better
                 judgments of advection pathways than others. In this
                 article, we develop this theory through a numerical
                 model of the primary visual cortex of the brain (Visual
                 Area 1) where contour enhancement is understood to
                 occur according to most neurological theories. We apply
                 a two-stage model of contour perception to various
                 visual representations of flow fields evaluated using
                 the advection task of Laidlaw et al. In the first
                 stage, contour {\em enhancement\/} is modeled based on
                 Li's cortical model. In the second stage, a model of
                 streamline {\em tracing\/} is proposed, designed to
                 support the advection task. We examine the predictive
                 power of the model by comparing its performance to that
                 of human subjects on the advection task with four
                 different visualizations. The results show the same
                 overall pattern for humans and the model. In both
                 cases, the best performance was obtained with an
                 aligned streamline based method, which tied with a
                 LIC-based method. Using a regular or jittered grid of
                 arrows produced worse results. The model yields
                 insights into the relative strengths of different flow
                 visualization methods for the task of visualizing
                 advection pathways.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Contour perception; flow visualization; perceptual
                 theory; visual cortex; visualization",
}

@Article{Mania:2010:EAS,
  author =       "Katerina Mania and Martin S. Banks",
  title =        "Editorial -- {APGV 2010} special issue",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823739",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hodgins:2010:SAA,
  author =       "Jessica Hodgins and Sophie J{\"o}rg and Carol
                 O'Sullivan and Sang Il Park and Moshe Mahler",
  title =        "The saliency of anomalies in animated human
                 characters",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823740",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual characters are much in demand for animated
                 movies, games, and other applications. Rapid advances
                 in performance capture and advanced rendering
                 techniques have allowed the movie industry in
                 particular to create characters that appear very
                 human-like. However, with these new capabilities has
                 come the realization that such characters are yet not
                 quite ``right.'' One possible hypothesis is that these
                 virtual humans fall into an ``Uncanny Valley'', where
                 the viewer's emotional response is repulsion or
                 rejection, rather than the empathy or emotional
                 engagement that their creators had hoped for. To
                 explore these issues, we created three animated
                 vignettes of an arguing couple with detailed motion for
                 the face, eyes, hair, and body. In a set of perceptual
                 experiments, we explore the relative importance of
                 different anomalies using two different methods: a
                 questionnaire to determine the emotional response to
                 the full-length vignettes, with and without facial
                 motion and audio; and a 2AFC (two alternative forced
                 choice) task to compare the performance of a virtual
                 ``actor'' in short clips (extracts from the vignettes)
                 depicting a range of different facial and body
                 anomalies. We found that the facial anomalies are
                 particularly salient, even when very significant body
                 animation anomalies are present.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "eye tracking; Human animation; motion capture;
                 perception of human motion; virtual characters",
}

@Article{Carter:2010:PMG,
  author =       "Elizabeth J. Carter and Lavanya Sharan and Laura
                 Trutoiu and Iain Matthews and Jessica K. Hodgins",
  title =        "Perceptually motivated guidelines for voice
                 synchronization in film",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823741",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We consume video content in a multitude of ways,
                 including in movie theaters, on television, on DVDs and
                 Blu-rays, online, on smart phones, and on portable
                 media players. For quality control purposes, it is
                 important to have a uniform viewing experience across
                 these various platforms. In this work, we focus on
                 voice synchronization, an aspect of video quality that
                 is strongly affected by current post-production and
                 transmission practices. We examined the synchronization
                 of an actor's voice and lip movements in two distinct
                 scenarios. First, we simulated the temporal mismatch
                 between the audio and video tracks that can occur
                 during dubbing or during broadcast. Next, we recreated
                 the pitch changes that result from conversions between
                 formats with different frame rates. We show, for the
                 first time, that these audio visual mismatches affect
                 viewer enjoyment. When temporal synchronization is
                 noticeably absent, there is a decrease in the perceived
                 performance quality and the perceived emotional
                 intensity of a performance. For pitch changes, we find
                 that higher pitch voices are not preferred, especially
                 for male actors. Based on our findings, we advise that
                 mismatched audio and video signals negatively affect
                 viewer experience.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "auditory perceptual research; human perception and
                 performance; Multisensory perception and integration;
                 visual psychophysics",
}

@Article{Wijntjes:2010:PPS,
  author =       "Maarten W. A. Wijntjes and Sylvia C. Pont",
  title =        "Pointing in pictorial space: Quantifying the perceived
                 relative depth structure in mono and stereo images of
                 natural scenes",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "24:1--24:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823742",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Although there has recently been a large increase in
                 commercial 3D applications, relatively little is known
                 about the quantitative perceptual improvement from
                 binocular disparity. In this study we developed a
                 method to measure the perceived relative depth
                 structure of natural scenes. Observers were instructed
                 to adjust the direction of a virtual pointer from one
                 object to another. The pointing data was used to
                 reconstruct the relative logarithmic depths of the
                 objects in pictorial space. The results showed that the
                 relative depth structure is more similar between
                 observers for stereo images than for mono images in two
                 out of three scenes. A similar result was found for the
                 depth range: for the same two scenes the stereo images
                 were perceived as having more depth than the monocular
                 images. In addition, our method allowed us to determine
                 the subjective center of projection. We found that the
                 pointing settings fitted the reconstructed depth best
                 for substantially wider fields of view than the
                 veridical center of projection for both mono and stereo
                 images. The results indicate that the improvement from
                 binocular disparity depends on the scene content:
                 scenes with sufficient monocular information may not
                 profit much from binocular disparity.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "binocular disparity; Depth perception; natural
                 scenes",
}

@Article{Couture:2010:ADD,
  author =       "Vincent Couture and Michael S. Langer and
                 S{\'e}bastien Roy",
  title =        "Analysis of disparity distortions in omnistereoscopic
                 displays",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "25:1--25:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823743",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "An omnistereoscopic image is a pair of panoramic
                 images that enables stereoscopic depth perception all
                 around an observer. An omnistereo projection on a
                 cylindrical display does not require tracking of the
                 observer's viewing direction. However, such a display
                 introduces stereo distortions. In this article, we
                 investigate two projection models for rendering 3D
                 scenes in omnistereo. The first is designed to give
                 zero disparity errors at the center of the visual
                 field. The second is the well-known slit-camera model.
                 For both models, disparity errors are shown to increase
                 gradually in the periphery, as visual stereo acuity
                 decreases. We use available data on human stereoscopic
                 acuity limits to argue that depth distortions caused by
                 these models are so small that they cannot be
                 perceived.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "depth acuity; median plane; Panorama; perception;
                 stereo",
}

@Article{Grechkin:2010:HDP,
  author =       "Timofey Y. Grechkin and Tien Dat Nguyen and Jodie M.
                 Plumert and James F. Cremer and Joseph K. Kearney",
  title =        "How does presentation method and measurement protocol
                 affect distance estimation in real and virtual
                 environments?",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "26:1--26:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823744",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We conducted two experiments that compared distance
                 perception in real and virtual environments in six
                 visual presentation methods using either timed imagined
                 walking or direct blindfolded walking, while
                 controlling for several other factors that could
                 potentially impact distance perception. Our
                 presentation conditions included unencumbered real
                 world, real world seen through an HMD, virtual world
                 seen through an HMD, augmented reality seen through an
                 HMD, virtual world seen on multiple, large immersive
                 screens, and photo-based presentation of the real world
                 seen on multiple, large immersive screens. We found
                 that there was a similar degree of underestimation of
                 distance in the HMD and large-screen presentations of
                 virtual environments. We also found that while wearing
                 the HMD can cause some degree of distance
                 underestimation, this effect depends on the measurement
                 protocol used. Finally, we found that photo-based
                 presentation did not help to improve distance
                 perception in a large-screen immersive display system.
                 The discussion focuses on points of similarity and
                 difference with previous work on distance estimation in
                 real and virtual environments.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Distance estimation; egocentric depth perception;
                 head-mounted displays; large-screen immersive displays;
                 perception; virtual environments",
}

@Article{Aydin:2010:VSE,
  author =       "Tun{\c{c}} Ozan Aydin and Martin {\v{C}}ad{\'\i}k and
                 Karol Myszkowski and Hans-Peter Seidel",
  title =        "Visually significant edges",
  journal =      j-TAP,
  volume =       "7",
  number =       "4",
  pages =        "27:1--27:??",
  month =        jul,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://doi.acm.org/10.1145/1823738.1823745",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Jul 22 12:46:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Numerous image processing and computer graphics
                 methods make use of either explicitly computed strength
                 of image edges, or an implicit edge strength definition
                 that is integrated into their algorithms. In both
                 cases, the end result is highly affected by the
                 computation of edge strength. We address several
                 shortcomings of the widely used gradient
                 magnitude-based edge strength model through the
                 computation of a hypothetical Human Visual System (HVS)
                 response at edge locations. Contrary to gradient
                 magnitude, the resulting ``visual significance'' values
                 account for various HVS mechanisms such as luminance
                 adaptation and visual masking, and are scaled in
                 perceptually linear units that are uniform across
                 images. The visual significance computation is
                 implemented in a fast multiscale second-generation
                 wavelet framework which we use to demonstrate the
                 differences in image retargeting, HDR image stitching,
                 and tone mapping applications with respect to the
                 gradient magnitude model. Our results suggest that
                 simple perceptual models provide qualitative
                 improvements on applications utilizing edge strength at
                 the cost of a modest computational burden.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "Edge strength; HDR; visual perception",
}

@Article{Vicentini:2010:EFT,
  author =       "M. Vicentini and S. Galvan and D. Botturi and P.
                 Fiorini",
  title =        "Evaluation of force and torque magnitude
                 discrimination thresholds on the human hand-arm
                 system",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857894",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article reports on experiments about haptic
                 perception aimed at measuring the force/torque
                 differential thresholds applied to the hand-arm system.
                 The experimental work analyzes how force is sent back
                 to the user by means of a 6 degrees-of-freedom haptic
                 device. Our findings on force perception indicate that
                 the just-noticeable-difference is generally higher than
                 previously reported in the literature and not constant
                 along the stimulus continuum. We found evidence that
                 the thresholds change also among the different
                 directions. Furthermore, asymmetries in force
                 perceptions, which were not described in previous
                 reports, can be evinced for most of the directions.
                 These findings support our claim that human beings
                 perceive forces differently along different directions,
                 thus suggesting that perception can also be enhanced by
                 suitable signal processing, that is, with a
                 manipulation of the force signal before it reaches the
                 haptic device.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mourkoussis:2010:QFV,
  author =       "Nicholaos Mourkoussis and Fiona M. Rivera and Tom
                 Troscianko and Tim Dixon and Rycharde Hawkes and
                 Katerina Mania",
  title =        "Quantifying fidelity for virtual environment
                 simulations employing memory schema assumptions",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857895",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In a virtual environment (VE), efficient techniques
                 are often needed to economize on rendering computation
                 without compromising the information transmitted. The
                 reported experiments devise a functional fidelity
                 metric by exploiting research on memory schemata.
                 According to the proposed measure, similar information
                 would be transmitted across synthetic and real-world
                 scenes depicting a specific schema. This would
                 ultimately indicate which areas in a VE could be
                 rendered in lower quality without affecting information
                 uptake. We examine whether computationally more
                 expensive scenes of greater visual fidelity affect
                 memory performance after exposure to immersive VEs, or
                 whether they are merely more aesthetically pleasing
                 than their diminished visual quality counterparts.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Llobera:2010:PMD,
  author =       "Joan Llobera and Bernhard Spanlang and Giulio Ruffini
                 and Mel Slater",
  title =        "Proxemics with multiple dynamic characters in an
                 immersive virtual environment",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857896",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "An experiment was carried out to examine the impact on
                 electrodermal activity of people when approached by
                 groups of one or four virtual characters at varying
                 distances. It was premised on the basis of proxemics
                 theory that the closer the approach of the virtual
                 characters to the participant, the greater the level of
                 physiological arousal. Physiological arousal was
                 measured by the number of skin conductance responses
                 within a short time period after the approach, and the
                 maximum change in skin conductance level 5 seconds
                 after the approach. The virtual characters were each
                 either female or a cylinder of human size, and one or
                 four characters approached each subject a total of 12
                 times.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernhard:2010:EPD,
  author =       "Matthias Bernhard and Efstathios Stavrakis and Michael
                 Wimmer",
  title =        "An empirical pipeline to derive gaze prediction
                 heuristics for {$3$D} action games",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857897",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Gaze analysis and prediction in interactive virtual
                 environments, such as games, is a challenging topic
                 since the 3D perspective and variations of the
                 viewpoint as well as the current task introduce many
                 variables that affect the distribution of gaze. In this
                 article, we present a novel pipeline to study
                 eye-tracking data acquired from interactive 3D
                 applications. The result of the pipeline is an
                 importance map which scores the amount of gaze spent on
                 each object. This importance map is then used as a
                 heuristic to predict a user's visual attention
                 according to the object properties present at runtime.
                 The novelty of this approach is that the analysis is
                 performed in object space and the importance map is
                 defined in the feature space of high-level
                 properties.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Li:2010:SCS,
  author =       "Bing Li and Weihua Xiong and De Xu and Hong Bao",
  title =        "A supervised combination strategy for illumination
                 chromaticity estimation",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857898",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Color constancy is an important perceptual ability of
                 humans to recover the color of objects invariant of
                 light information. It is also necessary for a robust
                 machine vision system. Until now, a number of color
                 constancy algorithms have been proposed in the
                 literature. In particular, the edge-based color
                 constancy uses the edge of an image to estimate light
                 color. It is shown to be a rich framework that can
                 represent many existing illumination estimation
                 solutions with various parameter settings. However,
                 color constancy is an ill-posed problem; every
                 algorithm is always given out under some assumptions
                 and can only produce the best performance when these
                 assumptions are satisfied.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hasic:2010:PGH,
  author =       "Jasminka Hasic and Alan Chalmers and Elena Sikudova",
  title =        "Perceptually guided high-fidelity rendering exploiting
                 movement bias in visual attention",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857899",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A major obstacle for real-time rendering of
                 high-fidelity graphics is computational complexity. A
                 key point to consider in the pursuit of ``realism in
                 real time'' in computer graphics is that the Human
                 Visual System (HVS) is a fundamental part of the
                 rendering pipeline. The human eye is only capable of
                 sensing image detail in a $2^\circ$ foveal region,
                 relying on rapid eye movements, or saccades, to jump
                 between points of interest. These points of interest
                 are prioritized based on the saliency of the objects in
                 the scene or the task the user is performing. Such
                 ``glimpses'' of a scene are then assembled by the HVS
                 into a coherent, but inevitably imperfect, visual
                 perception of the environment.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hover:2010:UBE,
  author =       "Raphael H{\"o}ver and Massimiliano {Di Luca} and
                 Matthias Harders",
  title =        "User-based evaluation of data-driven haptic
                 rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857900",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, the data-driven haptic rendering
                 approach presented in our earlier work is assessed. The
                 approach relies on recordings from real objects from
                 which a data-driven model is derived that captures the
                 haptic properties of the object. We conducted two
                 studies. In the first study, the Just Noticeable
                 Difference (JND) for small forces, as encountered in
                 our set-up, was determined. JNDs were obtained both for
                 active and passive user interaction. A conservative
                 threshold curve was derived that was then used to guide
                 the model generation in the second study. The second
                 study examined the achievable rendering fidelity for
                 two objects with different stiffnesses.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hassaine:2010:IPP,
  author =       "Djamel Hassaine and Nicolas S. Holliman and Simon P.
                 Liversedge",
  title =        "Investigating the performance of path-searching tasks
                 in depth on multiview displays",
  journal =      j-TAP,
  volume =       "8",
  number =       "1",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1857893.1857901",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 9 12:00:41 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Multiview auto-stereoscopic displays support both
                 stereopsis and head motion parallax depth cues and
                 could be superior for certain tasks. Previous work
                 suggests that a high viewpoint density (100 views/10cm
                 at the eye) is required to convincingly support motion
                 parallax. However, it remains unclear how viewpoint
                 density affects task performance, and this factor is
                 critical in determining display and system design
                 requirements. Therefore, we present a simulated
                 multiview display apparatus to undertake experiments
                 using a path-searching task in which we control two
                 independent variables: the stereoscopic depth and the
                 viewpoint density. In the first experiment, we varied
                 both cues and found that even small amounts of stereo
                 depth (2cm) reliably improved task accuracy and reduced
                 latency, whereas there was no evidence of dependence on
                 viewpoint density.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wilkie:2011:MLC,
  author =       "Richard M. Wilkie and John P. Wann and Robert S.
                 Allison",
  title =        "Modeling locomotor control: The advantages of mobile
                 gaze",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870077",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In 1958, JJ Gibson put forward proposals on the visual
                 control of locomotion. Research in the last 50 years
                 has served to clarify the sources of visual and
                 nonvisual information that contribute to successful
                 steering, but has yet to determine how this information
                 is optimally combined under conditions of uncertainty.
                 Here, we test the conditions under which a locomotor
                 robot with a mobile camera can steer effectively using
                 simple visual and extra-retinal parameters to examine
                 how such models cope with the noisy real-world visual
                 and motor estimates that are available to humans.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ennis:2011:PES,
  author =       "Cathy Ennis and Christopher Peters and Carol
                 O'Sullivan",
  title =        "Perceptual effects of scene context and viewpoint for
                 virtual pedestrian crowds",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870078",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article, we evaluate the effects of position,
                 orientation, and camera viewpoint on the plausibility
                 of pedestrian formations. In a set of three perceptual
                 studies, we investigated how humans perceive
                 characteristics of virtual crowds in static scenes
                 reconstructed from annotated still images, where the
                 orientations and positions of the individuals have been
                 modified. We found that by applying rules based on the
                 contextual information of the scene, we improved the
                 perceived realism of the crowd formations when compared
                 to random formations. We also examined the effect of
                 camera viewpoint on the plausibility of virtual
                 pedestrian scenes, and we found that an eye-level
                 viewpoint is more effective for disguising random
                 behaviors, while a canonical viewpoint results in these
                 behaviors being perceived as less realistic than an
                 isometric or top-down viewpoint.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Stich:2011:PMI,
  author =       "Timo Stich and Christian Linz and Christian Wallraven
                 and Douglas Cunningham and Marcus Magnor",
  title =        "Perception-motivated interpolation of image
                 sequences",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870079",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a method for image interpolation that is
                 able to create high-quality, perceptually convincing
                 transitions between recorded images. By implementing
                 concepts derived from human vision, the problem of a
                 physically correct image interpolation is relaxed to
                 that of image interpolation which is perceived as
                 visually correct by human observers. We find that it
                 suffices to focus on exact edge correspondences,
                 homogeneous regions and coherent motion to compute
                 convincing results. A user study confirms the visual
                 quality of the proposed image interpolation approach.
                 We show how each aspect of our approach increases
                 perceived quality of the result.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rosenholtz:2011:DPV,
  author =       "Ruth Rosenholtz and Amal Dorai and Rosalind Freeman",
  title =        "Do predictions of visual perception aid design?",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870080",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Understanding and exploiting the abilities of the
                 human visual system is an important part of the design
                 of usable user interfaces and information
                 visualizations. Designers traditionally learn
                 qualitative rules of thumb for how to enable quick,
                 easy, and veridical perception of their design. More
                 recently, work in human and computer vision has
                 produced more quantitative models of human perception,
                 which take as input arbitrary, complex images of a
                 design. In this article, we ask whether models of
                 perception aid the design process, using our tool
                 DesignEye as a working example of a perceptual tool
                 incorporating such models. Through a series of
                 interactions with designers and design teams, we find
                 that the models can help, but in somewhat unexpected
                 ways.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Huckauf:2011:OSG,
  author =       "Anke Huckauf and Mario H. Urbina",
  title =        "Object selection in gaze controlled systems: What you
                 don't look at is what you get",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "13:1--13:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870081",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Controlling computers using eye movements can provide
                 a fast and efficient alternative to the computer mouse.
                 However, implementing object selection in
                 gaze-controlled systems is still a challenge. Dwell
                 times or fixations on a certain object typically used
                 to elicit the selection of this object show several
                 disadvantages. We studied deviations of critical
                 thresholds by an individual and task-specific
                 adaptation method. This demonstrated an enormous
                 variability of optimal dwell times. We developed an
                 alternative approach using antisaccades for selection.
                 For selection by antisaccades, highlighted objects are
                 copied to one side of the object. The object is
                 selected when fixating to the side opposed to that copy
                 requiring to inhibit an automatic gaze shift toward new
                 objects.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Phillips:2011:ORE,
  author =       "P. Jonathon Phillips and Fang Jiang and Abhijit
                 Narvekar and Julianne Ayyad and Alice J. O'Toole",
  title =        "An other-race effect for face recognition algorithms",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "14:1--14:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870082",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Psychological research indicates that humans recognize
                 faces of their own race more accurately than faces of
                 other races. This ``other-race effect'' occurs for
                 algorithms tested in a recent international competition
                 for state-of-the-art face recognition algorithms. We
                 report results for a Western algorithm made by fusing
                 eight algorithms from Western countries and an East
                 Asian algorithm made by fusing five algorithms from
                 East Asian countries. At the low false accept rates
                 required for most security applications, the Western
                 algorithm recognized Caucasian faces more accurately
                 than East Asian faces and the East Asian algorithm
                 recognized East Asian faces more accurately than
                 Caucasian faces.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{TenHolt:2011:HIS,
  author =       "Gineke A. {Ten Holt} and Andrea J. {Van Doorn} and
                 Marcel J. T. Reinders and Emile A. Hendriks and Huib
                 {De Ridder}",
  title =        "Human-inspired search for redundancy in automatic sign
                 language recognition",
  journal =      j-TAP,
  volume =       "8",
  number =       "2",
  pages =        "15:1--15:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/1870076.1870083",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jan 26 14:12:04 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Human perception of sign language can serve as
                 inspiration for the improvement of automatic
                 recognition systems. Experiments with human signers
                 show that sign language signs contain redundancy over
                 time. In this article, experiments are conducted to
                 investigate whether comparable redundancies also exist
                 for an automatic sign language recognition system. Such
                 redundancies could be exploited, for example, by
                 reserving more processing resources for the more
                 informative phases of a sign, or by discarding
                 uninformative phases. In the experiments, an automatic
                 system is trained and tested on isolated fragments of
                 sign language signs. The stimuli used were similar to
                 those of the human signer experiments, allowing us to
                 compare the results.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Endres:2011:EHO,
  author =       "Dominik Endres and Andrea Christensen and Lars Omlor
                 and Martin A. Giese",
  title =        "Emulating human observers with {Bayesian} binning:
                 Segmentation of action streams",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2010325.2010326",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Natural body movements arise in the form of temporal
                 sequences of individual actions. During visual action
                 analysis, the human visual system must accomplish a
                 temporal segmentation of the action stream into
                 individual actions. Such temporal segmentation is also
                 essential to build hierarchical models for action
                 synthesis in computer animation. Ideally, such
                 segmentations should be computed automatically in an
                 unsupervised manner. We present an unsupervised
                 segmentation algorithm that is based on Bayesian
                 Binning (BB) and compare it to human segmentations
                 derived from psychophysical data. BB has the advantage
                 that the observation model can be easily exchanged.
                 Moreover, being an exact Bayesian method, BB allows for
                 the automatic determination of the number and positions
                 of segmentation points.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Trutoiu:2011:MAE,
  author =       "Laura C. Trutoiu and Elizabeth J. Carter and Iain
                 Matthews and Jessica K. Hodgins",
  title =        "Modeling and animating eye blinks",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2010325.2010327",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Facial animation often falls short in conveying the
                 nuances present in the facial dynamics of humans. In
                 this article, we investigate the subtleties of the
                 spatial and temporal aspects of eye blinks.
                 Conventional methods for eye blink animation generally
                 employ temporally and spatially symmetric sequences;
                 however, naturally occurring blinks in humans show a
                 pronounced asymmetry on both dimensions. We present an
                 analysis of naturally occurring blinks that was
                 performed by tracking data from high-speed video using
                 active appearance models. Based on this analysis, we
                 generate a set of key-frame parameters that closely
                 match naturally occurring blinks. We compare the
                 perceived naturalness of blinks that are animated based
                 on real data to those created using textbook animation
                 curves.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Napieralski:2011:NFD,
  author =       "Phillip E. Napieralski and Bliss M. Altenhoff and
                 Jeffrey W. Bertrand and Lindsay O. Long and Sabarish
                 V. Babu and Christopher C. Pagano and Justin Kern and
                 Timothy A. Davis",
  title =        "Near-field distance perception in real and virtual
                 environments using both verbal and action responses",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2010325.2010328",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Few experiments have been performed to investigate
                 near-field egocentric distance estimation in an
                 Immersive Virtual Environment (IVE) as compared to the
                 Real World (RW). This article investigates near-field
                 distance estimation in IVEs and RW conditions using
                 physical reach and verbal report measures, by using an
                 apparatus similar to that used by Bingham and Pagano
                 [1998]. Analysis of our experiment shows distance
                 compression in both the IVE and RW conditions in
                 participants' perceptual judgments to targets. This is
                 consistent with previous research in both action space
                 in an IVE and reach space with Augmented Reality (AR).
                 Analysis of verbal responses from participants revealed
                 that participants underestimated significantly less in
                 the virtual world as compared to the RW.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Williams:2011:EWP,
  author =       "Betsy Williams and Stephen Bailey and Gayathri
                 Narasimham and Muqun Li and Bobby Bodenheimer",
  title =        "Evaluation of walking in place on a {Wii} balance
                 board to explore a virtual environment",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2010325.2010329",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this work, we present a method of ``Walking In
                 Place'' (WIP) on the Nintendo Wii Fit Balance Board to
                 explore a virtual environment. We directly compare our
                 method to joystick locomotion and normal walking. The
                 joystick proves inferior to physically walking and to
                 WIP on the Wii Balance Board (WIP--Wii). Interestingly,
                 we find that physically exploring an environment on
                 foot is equivalent in terms of spatial orientation to
                 exploring an environment using our WIP--Wii method.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Navarro:2011:PCM,
  author =       "Fernando Navarro and Susana Castillo and Francisco J.
                 Ser{\'o}n and Diego Gutierrez",
  title =        "Perceptual considerations for motion blur rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "3",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2010325.2010330",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Aug 23 18:20:29 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Motion blur is a frequent requirement for the
                 rendering of high-quality animated images. However, the
                 computational resources involved are usually higher
                 than those for images that have not been temporally
                 antialiased. In this article we study the influence of
                 high-level properties such as object material and
                 speed, shutter time, and antialiasing level. Based on
                 scenes containing variations of these parameters, we
                 design different psychophysical experiments to
                 determine how influential they are in the perception of
                 image quality. This work gives insights on the effects
                 these parameters have and exposes certain situations
                 where motion blurred stimuli may be indistinguishable
                 from a gold standard.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Hodgson:2011:RWE,
  author =       "Eric Hodgson and Eric Bachmann and David Waller",
  title =        "Redirected walking to explore virtual environments:
                 Assessing the potential for spatial interference",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "22:1--22:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043604",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rosli:2011:AGC,
  author =       "Roslizawaty Mohd Rosli and Hong Z. Tan and Robert W.
                 Proctor and Rob Gray",
  title =        "Attentional gradient for crossmodal proximal-distal
                 tactile cueing of visual spatial attention",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "23:1--23:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043605",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bernhard:2011:BTF,
  author =       "Matthias Bernhard and Karl Grosse and Michael
                 Wimmer",
  title =        "Bimodal task-facilitation in a virtual traffic
                 scenario through spatialized sound rendering",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "24:1--24:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043606",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Souman:2011:CEU,
  author =       "J. L. Souman and P. Robuffo Giordano and M. Schwaiger
                 and I. Frissen and T. Th{\"u}mmel and H. Ulbrich and
                 A. De Luca and H. H. B{\"u}lthoff and M. O. Ernst",
  title =        "{CyberWalk}: Enabling unconstrained omnidirectional
                 walking through virtual environments",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "25:1--25:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043607",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Nguyen:2011:ESC,
  author =       "Tien Dat Nguyen and Christine J. Ziemer and Timofey
                 Grechkin and Benjamin Chihak and Jodie M. Plumert and
                 James F. Cremer and Joseph K. Kearney",
  title =        "Effects of scale change on distance perception in
                 virtual environments",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "26:1--26:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043608",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Thumfart:2011:MHA,
  author =       "Stefan Thumfart and Richard H. A. H. Jacobs and Edwin
                 Lughofer and Christian Eitzinger and Frans
                 W. Cornelissen and Werner Groissboeck and Roland
                 Richter",
  title =        "Modeling human aesthetic perception of visual
                 textures",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "27:1--27:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043609",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Au:2011:IMV,
  author =       "Carmen E. Au and James J. Clark",
  title =        "Integrating multiple views with virtual mirrors to
                 facilitate scene understanding",
  journal =      j-TAP,
  volume =       "8",
  number =       "4",
  pages =        "28:1--28:??",
  month =        nov,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2043603.2043610",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Dec 15 09:27:03 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "28",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Vanhala:2012:VFA,
  author =       "Toni Vanhala and Veikko Surakka and Matthieu Courgeon
                 and Jean-Claude Martin",
  title =        "Voluntary facial activations regulate physiological
                 arousal and subjective experiences during virtual
                 social stimulation",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2134203.2134204",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Exposure to distressing computer-generated stimuli and
                 feedback of physiological changes during exposure have
                 been effective in the treatment of anxiety disorders
                 (e.g., social phobia). Here we studied voluntary facial
                 activations as a method for regulating more spontaneous
                 physiological changes during virtual social
                 stimulation. Twenty-four participants with a low or
                 high level of social anxiety activated either the
                 corrugator supercilii (used in frowning) or the
                 zygomaticus major (used in smiling) facial muscle to
                 keep a female or a male computer character walking
                 towards them. The more socially anxious participants
                 had a higher level of skin conductance throughout the
                 trials as compared to less anxious participants. Within
                 both groups, short-term skin conductance responses were
                 enhanced both during and after facial activations; and
                 corrugator supercilii activations facilitated longer
                 term electrodermal relaxation. Zygomaticus major
                 activations had opposite effects on subjective
                 emotional ratings of the less and the more socially
                 anxious. In sum, voluntary facial activations were
                 effective in regulating emotional arousal during
                 virtual social exposure. Corrugator supercilii
                 activation was found an especially promising method for
                 facilitating autonomic relaxation.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bulling:2012:MRR,
  author =       "Andreas Bulling and Jamie A. Ward and Hans Gellersen",
  title =        "Multimodal recognition of reading activity in transit
                 using body-worn sensors",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2134203.2134205",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Reading is one of the most well-studied visual
                 activities. Vision research traditionally focuses on
                 understanding the perceptual and cognitive processes
                 involved in reading. In this work we recognize reading
                 activity by jointly analyzing eye and head movements of
                 people in an everyday environment. Eye movements are
                 recorded using an electrooculography (EOG) system; body
                 movements using body-worn inertial measurement units.
                 We compare two approaches for continuous recognition of
                 reading: String matching (STR) that explicitly models
                 the characteristic horizontal saccades during reading,
                 and a support vector machine (SVM) that relies on 90
                 eye movement features extracted from the eye movement
                 data. We evaluate both methods in a study performed
                 with eight participants reading while sitting at a
                 desk, standing, walking indoors and outdoors, and
                 riding a tram. We introduce a method to segment reading
                 activity by exploiting the sensorimotor coordination of
                 eye and head movements during reading. Using
                 person-independent training, we obtain an average
                 precision for recognizing reading of 88.9\% (recall
                 72.3\%) using STR and of 87.7\% (recall 87.9\%) using
                 SVM over all participants. We show that the proposed
                 segmentation scheme improves the performance of
                 recognizing reading events by more than 24\%. Our work
                 demonstrates that the joint analysis of eye and body
                 movements is beneficial for reading recognition and
                 opens up discussion on the wider applicability of a
                 multimodal recognition approach to other visual and
                 physical activities.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kastanis:2012:RLU,
  author =       "Iason Kastanis and Mel Slater",
  title =        "Reinforcement learning utilizes proxemics: An avatar
                 learns to manipulate the position of people in
                 immersive virtual reality",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2134203.2134206",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A reinforcement learning (RL) method was used to train
                 a virtual character to move participants to a specified
                 location. The virtual environment depicted an alleyway
                 displayed through a wide field-of-view head-tracked
                 stereo head-mounted display. Based on proxemics theory,
                 we predicted that when the character approached within
                 a personal or intimate distance to the participants,
                 they would be inclined to move backwards out of the
                 way. We carried out a between-groups experiment with 30
                 female participants, with 10 assigned arbitrarily to
                 each of the following three groups: In the Intimate
                 condition the character could approach within 0.38m and
                 in the Social condition no nearer than 1.2m. In the
                 Random condition the actions of the virtual character
                 were chosen randomly from among the same set as in the
                 RL method, and the virtual character could approach
                 within 0.38m. The experiment continued in each case
                 until the participant either reached the target or 7
                 minutes had elapsed. The distributions of the times
                 taken to reach the target showed significant
                 differences between the three groups, with 9 out of 10
                 in the Intimate condition reaching the target
                 significantly faster than the 6 out of 10 who reached
                 the target in the Social condition. Only 1 out of 10 in
                 the Random condition reached the target. The experiment
                 is an example of applied presence theory: we rely on
                 the many findings that people tend to respond
                 realistically in immersive virtual environments, and
                 use this to get people to achieve a task of which they
                 had been unaware. This method opens up the door for
                 many such applications where the virtual environment
                 adapts to the responses of the human participants with
                 the aim of achieving particular goals.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Jerald:2012:SMT,
  author =       "Jason Jerald and Mary Whitton and Frederick P.
                 {Brooks, Jr.}",
  title =        "Scene-motion thresholds during head yaw for immersive
                 virtual environments",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2134203.2134207",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In order to better understand how scene motion is
                 perceived in immersive virtual environments, we
                 measured scene-motion thresholds under different
                 conditions across three experiments. Thresholds were
                 measured during quasi-sinusoidal head yaw, single
                 left-to-right or right-to-left head yaw, different
                 phases of head yaw, slow to fast head yaw, scene motion
                 relative to head yaw, and two scene-illumination
                 levels. We found that across various conditions (1)
                 thresholds are greater when the scene moves with head
                 yaw (corresponding to gain {$<$}1.0) than when the
                 scene moves against head yaw (corresponding to gain
                 {$>$}1.0), and (2) thresholds increase as head motion
                 increases.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ziat:2012:EVM,
  author =       "Mounia Ziat and Carmen Au and Amin Haji Abolhassani
                 and James J. Clark",
  title =        "Enhancing visuospatial map learning through action on
                 cellphones",
  journal =      j-TAP,
  volume =       "9",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2134203.2134208",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Mar 30 17:41:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The visuospatial learning of a map on cellphone
                 displays was examined. The spatial knowledge of human
                 participants was assessed after they had learned the
                 relative positions of London Underground stations on a
                 map via passive, marginally active, or active
                 exploration. Following learning, the participants were
                 required to answer questions in relation to the spatial
                 representation and distribution of the stations on the
                 map. Performances were compared between conditions
                 involving (1) without auditory cues versus continuous
                 auditory cues; (2) without auditory cues versus
                 noncontinuous auditory cues; and (3) continuous
                 auditory cues versus noncontinuous auditory cues.
                 Results showed that the participants perfomed better
                 following active and marginally-active explorations, as
                 compared to purely passive learning. These results also
                 suggest that under specific conditions (i.e.,
                 continuous sound with extremely fast tempo) there is no
                 benefit to spatial abilities from active exploration
                 over passive observation; while continuous sound with
                 moderate to fast tempo is effective for simple actions
                 (i.e., key press).",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Watanabe:2012:GCV,
  author =       "Junji Watanabe and Taro Maeda and Hideyuki Ando",
  title =        "Gaze-contingent visual presentation technique with
                 electro-ocular-graph-based saccade detection",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2207216.2207217",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "When a single column of light sources flashes quickly
                 in a temporal pattern during a horizontal saccade eye
                 movement, two-dimensional images can be perceived in
                 the space neighboring the light source. This perceptual
                 phenomenon has been applied to light devices for visual
                 arts and entertainment. However, a serious drawback in
                 exploiting this perceptual phenomenon for a visual
                 information display is that a two-dimensional image
                 cannot be viewed if there is any discrepancy between
                 the ocular motility and the flicker timing. We overcame
                 this drawback by combining the saccade-based display
                 with an electro-ocular-graph-based sensor for detecting
                 the saccade. The saccade onset is measured with the
                 electro-ocular-graph-based sensor in real time and the
                 saccade-based display is activated instantaneously as
                 the saccade begins. The psychophysical experiments
                 described in this article demonstrates that the method
                 that we used can detect saccades with low latency and
                 allows the saccade-based display to convey visual
                 information more effectively than when the light
                 sources continuously blink regardless of the observer's
                 eye movements.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ziemek:2012:EEO,
  author =       "Tina Ziemek and Sarah Creem-Regehr and William
                 Thompson and Ross Whitaker",
  title =        "Evaluating the effectiveness of orientation indicators
                 with an awareness of individual differences",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2207216.2207218",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Understanding how users perceive 3D geometric objects
                 can provide a basis for creating more effective tools
                 for visualization in applications such as CAD or
                 medical imaging. This article examines how orientation
                 indicators affect users' accuracy in perceiving the
                 shape of a 3D object shown as multiple views. Multiple
                 views force users to infer the orientation of an object
                 and recognize corresponding features between distinct
                 vantage points. These are difficult tasks, and not all
                 users are able to carry them out accurately. We use a
                 cognitive experimental paradigm to evaluate the
                 effectiveness of two types of orientation indicators on
                 a person's ability to compare views of objects
                 presented in different orientations. The orientation
                 indicators implemented were colocated, which shared a
                 center-point with the 3D object, or noncolocated with
                 (displaced from) the 3D object. The study accounts for
                 additional factors including object complexity, axis of
                 rotation, and users' individual differences in spatial
                 abilities. Our results show that an orientation
                 indicator helps users in comparing multiple views, and
                 that the effect is influenced by the type of aid, a
                 person's spatial ability, and the difficulty of the
                 task. In addition to establishing an effect of an
                 orientation indicator, this article helps demonstrate
                 the application of a particular experimental paradigm
                 and analysis, as well as the importance of considering
                 individual differences when designing interface aids.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Laitinen:2012:PTF,
  author =       "Mikko-Ville Laitinen and Tapani Pihlajam{\"a}ki and
                 Cumhur Erkut and Ville Pulkki",
  title =        "Parametric time-frequency representation of spatial
                 sound in virtual worlds",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2207216.2207219",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Directional audio coding (DirAC) is a parametric
                 time-frequency domain method for processing spatial
                 audio based on psychophysical assumptions and on
                 energetic analysis of the sound field. Methods to use
                 DirAC in spatial sound synthesis for virtual worlds are
                 presented in this article. Formal listening tests are
                 used to show that DirAC can be used to position and to
                 control the spatial extent of virtual sound sources
                 with good audio quality. It is also shown that DirAC
                 can be used to generate reverberation for N-channel
                 horizontal listening with only two monophonic
                 reverberators without a prominent loss in quality when
                 compared with quality obtained with N-channel
                 reverberators.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Leroy:2012:RTA,
  author =       "Laure Leroy and Philippe Fuchs and Guillaume Moreau",
  title =        "Real-time adaptive blur for reducing eye strain in
                 stereoscopic displays",
  journal =      j-TAP,
  volume =       "9",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2207216.2207220",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Wed Jun 13 17:24:25 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Stereoscopic devices are widely used (immersion-based
                 working environments, stereoscopically-viewed movies,
                 auto-stereoscopic screens). In some instances, exposure
                 to stereoscopic immersion techniques can be lengthy,
                 and so eye strain sets in. We propose a method for
                 reducing eye strain induced by stereoscopic vision.
                 After reviewing sources of eye strain linked to
                 stereoscopic vision, we focus on one of these sources:
                 images with high frequency content associated with
                 large disparities. We put forward an algorithm for
                 removing the irritating high frequencies in high
                 horizontal disparity zones (i.e., for virtual objects
                 appearing far from the real screen level). We elaborate
                 on our testing protocol to establish that our image
                 processing method reduces eye strain caused by
                 stereoscopic vision, both objectively and subjectively.
                 We subsequently quantify the positive effects of our
                 algorithm on the relief of eye strain and discuss
                 further research perspectives.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{McDonnell:2012:ISI,
  author =       "Rachel McDonnell and Veronica Sundstedt",
  title =        "Introduction to special issue {SAP 2012}",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325723",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Schumacher:2012:WFP,
  author =       "Matthaeus Schumacher and Volker Blanz",
  title =        "Which facial profile do humans expect after seeing a
                 frontal view? a comparison with a linear face model",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325724",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Manipulated versions of three-dimensional faces that
                 have different profiles, but almost the same appearance
                 in frontal views, provide a novel way to investigate if
                 and how humans use class-specific knowledge to infer
                 depth from images of faces. After seeing a frontal
                 view, participants have to select the profile that
                 matches that view. The profiles are original (ground
                 truth), average, random other, and two solutions
                 computed with a linear face model (3D Morphable Model).
                 One solution is based on 2D vertex positions, the other
                 on pixel colors in the frontal view. The human
                 responses demonstrate that humans neither guess nor
                 just choose the average profile. The results also
                 indicate that humans actually use the information from
                 the front view, and not just rely on the plausibility
                 of the profiles per se. All our findings are perfectly
                 consistent with a correlation-based inference in a
                 linear face model. The results also verify that the 3D
                 reconstructions from our computational algorithms
                 (stimuli 4 and 5) are similar to what humans expect,
                 because they are chosen to be the true profile equally
                 often as the ground-truth profiles. Our experiments
                 shed new light on the mechanisms of human face
                 perception and present a new quality measure for 3D
                 reconstruction algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mustafa:2012:STE,
  author =       "Maryam Mustafa and Stefan Guthe and Marcus Magnor",
  title =        "Single-trial {EEG} classification of artifacts in
                 videos",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325725",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we use an ElectroEncephaloGraph (EEG)
                 to explore the perception of artifacts that typically
                 appear during rendering and determine the perceptual
                 quality of a sequence of images. Although there is an
                 emerging interest in using an EEG for image quality
                 assessment, one of the main impediments to the use of
                 an EEG is the very low Signal-to-Noise Ratio (SNR)
                 which makes it exceedingly difficult to distinguish
                 neural responses from noise. Traditionally,
                 event-related potentials have been used for analysis of
                 EEG data. However, they rely on averaging and so
                 require a large number of participants and trials to
                 get meaningful data. Also, due to the low SNR ERP's
                 are not suited for single-trial classification. We
                 propose a novel wavelet-based approach for evaluating
                 EEG signals which allows us to predict the perceived
                 image quality from only a single trial. Our
                 wavelet-based algorithm is able to filter the EEG data
                 and remove noise, eliminating the need for many
                 participants or many trials. With this approach it is
                 possible to use data from only 10 electrode channels
                 for single-trial classification and predict the
                 presence of an artifact with an accuracy of 85\%. We
                 also show that it is possible to differentiate and
                 classify a trial based on the exact type of artifact
                 viewed. Our work is particularly useful for
                 understanding how the human visual system responds to
                 different types of degradations in images and videos.
                 An understanding of the perception of typical
                 image-based rendering artifacts forms the basis for the
                 optimization of rendering and masking algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Niu:2012:VES,
  author =       "Yaqing Niu and Rebecca M. Todd and Matthew Kyan and
                 Adam K. Anderson",
  title =        "Visual and emotional salience influence eye
                 movements",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325726",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In natural vision both stimulus features and
                 cognitive/affective factors influence an observer's
                 attention. However, the relationship between
                 stimulus-driven (bottom-up) and cognitive/affective
                 (top-down) factors remains controversial: How well does
                 the classic visual salience model account for gaze
                 locations? Can emotional salience counteract strong
                 visual stimulus signals and shift attention allocation
                 irrespective of bottom-up features? Here we compared
                 Itti and Koch's [2000] and Spectral Residual (SR)
                 visual salience model and explored the impact of visual
                 salience and emotional salience on eye movement
                 behavior, to understand the competition between visual
                 salience and emotional salience and how they affect
                 gaze allocation in complex scenes viewing. Our results
                 show the insufficiency of visual salience models in
                 predicting fixation. Emotional salience can override
                 visual salience and can determine attention allocation
                 in complex scenes. These findings are consistent with
                 the hypothesis that cognitive/affective factors play a
                 dominant role in active gaze control.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhang:2012:MAV,
  author =       "Ruimin Zhang and Anthony Nordman and James Walker and
                 Scott A. Kuhl",
  title =        "Minification affects verbal- and action-based distance
                 judgments differently in head-mounted displays",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325727",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Numerous studies report that people underestimate
                 egocentric distances in Head-Mounted Display (HMD)
                 virtual environments compared to real environments as
                 measured by direct blind walking. Geometric
                 minification, or rendering graphics with a larger field
                 of view than the display's field of view, has been
                 shown to eliminate this underestimation in a virtual
                 hallway environment [Kuhl et al. 2006, 2009]. This
                 study demonstrates that minification affects blind
                 walking in a sparse classroom and does not influence
                 verbal reports of distance. Since verbal reports of
                 distance have been reported to be compressed in real
                 environments, we speculate that minification in an HMD
                 replicates peoples' real-world blind walking and verbal
                 report distance judgments. We also demonstrate a new
                 method for quantifying any unintentional miscalibration
                 in our experiments. This process involves using the HMD
                 in an augmented reality configuration and having each
                 participant indicate where the targets and horizon
                 appeared after each experiment. More work is necessary
                 to understand how and why minification changes verbal-
                 and walking-based egocentric distance judgments
                 differently.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Couture:2012:PBS,
  author =       "Vincent Couture and Michael S. Langer and
                 S{\'e}bastien Roy",
  title =        "Perception of blending in stereo motion panoramas",
  journal =      j-TAP,
  volume =       "9",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2325722.2325728",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Jul 31 17:40:12 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Most methods for synthesizing panoramas assume that
                 the scene is static. A few methods have been proposed
                 for synthesizing stereo or motion panoramas, but there
                 has been little attempt to synthesize panoramas that
                 have both stereo and motion. One faces several
                 challenges in synthesizing stereo motion panoramas, for
                 example, to ensure temporal synchronization between
                 left and right views in each frame, to avoid spatial
                 distortion of moving objects, and to continuously loop
                 the video in time. We have recently developed a stereo
                 motion panorama method that tries to address some of
                 these challenges. The method blends space-time regions
                 of a video XYT volume, such that the blending regions
                 are distinct and translate over time. This article
                 presents a perception experiment that evaluates certain
                 aspects of the method, namely how well observers can
                 detect such blending regions. We measure detection time
                 thresholds for different blending widths and for
                 different scenes, and for monoscopic versus
                 stereoscopic videos. Our results suggest that blending
                 may be more effective in image regions that do not
                 contain coherent moving objects that can be tracked
                 over time. For example, we found moving water and
                 partly transparent smoke were more effectively blended
                 than swaying branches. We also found that performance
                 in the task was roughly the same for mono versus stereo
                 videos.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{OToole:2012:CFR,
  author =       "Alice J. O'Toole and Xaiobo An and Joseph Dunlop and
                 Vaidehi Natu and P. Jonathon Phillips",
  title =        "Comparing face recognition algorithms to humans on
                 challenging tasks",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355599",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We compared face identification by humans and machines
                 using images taken under a variety of uncontrolled
                 illumination conditions in both indoor and outdoor
                 settings. Natural variations in a person's day-to-day
                 appearance (e.g., hair style, facial expression, hats,
                 glasses, etc.) contributed to the difficulty of the
                 task. Both humans and machines matched the identity of
                 people (same or different) in pairs of frontal view
                 face images. The degree of difficulty introduced by
                 photometric and appearance-based variability was
                 estimated using a face recognition algorithm created by
                 fusing three top-performing algorithms from a recent
                 international competition. The algorithm computed
                 similarity scores for a constant set of same-identity
                 and different-identity pairings from multiple images.
                 Image pairs were assigned to good, moderate, and poor
                 accuracy groups by ranking the similarity scores for
                 each identity pairing, and dividing these rankings into
                 three strata. This procedure isolated the role of
                 photometric variables from the effects of the
                 distinctiveness of particular identities. Algorithm
                 performance for these constant identity pairings varied
                 dramatically across the groups. In a series of
                 experiments, humans matched image pairs from the good,
                 moderate, and poor conditions, rating the likelihood
                 that the images were of the same person (1: sure same
                 --- 5: sure different). Algorithms were more accurate
                 than humans in the good and moderate conditions, but
                 were comparable to humans in the poor accuracy
                 condition. To date, these are the most variable
                 illumination- and appearance-based recognition
                 conditions on which humans and machines have been
                 compared. The finding that machines were never less
                 accurate than humans on these challenging frontal
                 images suggests that face recognition systems may be
                 ready for applications with comparable difficulty. We
                 speculate that the superiority of algorithms over
                 humans in the less challenging conditions may be due to
                 the algorithms' use of detailed, view-specific identity
                 information. Humans may consider this information less
                 important due to its limited potential for robust
                 generalization in suboptimal viewing conditions.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Alonso-Arevalo:2012:CSC,
  author =       "Miguel A. Alonso-Arevalo and Simon Shelley and Dik
                 Hermes and Jacqueline Hollowood and Michael Pettitt and
                 Sarah Sharples and Armin Kohlrausch",
  title =        "Curve shape and curvature perception through
                 interactive sonification",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355600",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In this article we present an approach that uses sound
                 to communicate geometrical data related to a virtual
                 object. This has been developed in the framework of a
                 multimodal interface for product design. The interface
                 allows a designer to evaluate the quality of a 3-D
                 shape using touch, vision, and sound. Two important
                 considerations addressed in this article are the nature
                 of the data that is sonified and the haptic interaction
                 between the user and the interface, which in fact
                 triggers the sound and influences its characteristics.
                 Based on these considerations, we present a number of
                 sonification strategies that are designed to map the
                 geometrical data of interest into sound. The
                 fundamental frequency of various sounds was used to
                 convey the curve shape or the curvature to the
                 listeners. Two evaluation experiments are described,
                 one involves participants with a varied background, the
                 other involved the intended users, i.e. participants
                 with a background in industrial design. The results
                 show that independent of the sonification method used
                 and independent of whether the curve shape or the
                 curvature were sonified, the sonification was quite
                 successful. In the first experiment participants had a
                 success rate of about 80\% in a multiple choice task,
                 in the second experiment it took the participants on
                 average less than 20 seconds to find the maximum,
                 minimum or inflection points of the curvature of a test
                 curve.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rocchesso:2012:PRP,
  author =       "Davide Rocchesso and Stefano Delle Monache",
  title =        "Perception and replication of planar sonic gestures",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355601",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "As tables, boards, and walls become surfaces where
                 interaction can be supported by auditory displays, it
                 becomes important to know how accurately and
                 effectively a spatial gesture can be rendered by means
                 of an array of loudspeakers embedded in the surface.
                 Two experiments were designed and performed to assess:
                 (i) how sequences of sound pulses are perceived as
                 gestures when the pulses are distributed in space and
                 time along a line; (ii) how the timing of pulses
                 affects the perceived and reproduced continuity of
                 sequences; and (iii) how effectively a second parallel
                 row of speakers can extend sonic gestures to a
                 two-dimensional space. Results show that azimuthal
                 trajectories can be effectively replicated and that
                 switching between discrete and continuous gestures
                 occurs within the range of inter-pulse interval from 75
                 to 300ms. The vertical component of sonic gestures
                 cannot be reliably replicated.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Rebillat:2012:AVA,
  author =       "Marc R{\'e}billat and Xavier Boutillon and {\'E}tienne
                 Corteel and Brian F. G. Katz",
  title =        "Audio, visual, and audio-visual egocentric distance
                 perception by moving subjects in virtual environments",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355602",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a study on audio, visual, and audio-visual
                 egocentric distance perception by moving subjects in
                 virtual environments. Audio-visual rendering is
                 provided using tracked passive visual stereoscopy and
                 acoustic wave field synthesis (WFS). Distances are
                 estimated using indirect blind-walking (triangulation)
                 under each rendering condition. Experimental results
                 show that distances perceived in the virtual
                 environment are systematically overestimated for
                 rendered distances closer than the position of the
                 audio-visual rendering system and underestimated for
                 farther distances. Interestingly, subjects perceived
                 each virtual object at a modality-independent distance
                 when using the audio modality, the visual modality, or
                 the combination of both. WFS was able to synthesize
                 perceptually meaningful sound fields. Dynamic
                 audio-visual cues were used by subjects when estimating
                 the distances in the virtual world. Moving may have
                 provided subjects with a better visual distance
                 perception of close distances than if they were static.
                 No correlation between the feeling of presence and the
                 visual distance underestimation has been found. To
                 explain the observed perceptual distance compression,
                 it is proposed that, due to conflicting distance cues,
                 the audio-visual rendering system physically anchors
                 the virtual world to the real world. Virtual objects
                 are thus attracted by the physical audio-visual
                 rendering system.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Healey:2012:LRV,
  author =       "Christopher G. Healey and Amit P. Sawant",
  title =        "On the limits of resolution and visual angle in
                 visualization",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355603",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article describes a perceptual level-of-detail
                 approach for visualizing data. Properties of a dataset
                 that cannot be resolved in the current display
                 environment need not be shown, for example, when too
                 few pixels are used to render a data element, or when
                 the element's subtended visual angle falls below the
                 acuity limits of our visual system. To identify these
                 situations, we asked: (1) What type of information can
                 a human user perceive in a particular display
                 environment? (2) Can we design visualizations that
                 control what they represent relative to these limits?
                 and (3) Is it possible to dynamically update a
                 visualization as the display environment changes, to
                 continue to effectively utilize our perceptual
                 abilities? To answer these questions, we conducted
                 controlled experiments that identified the pixel
                 resolution and subtended visual angle needed to
                 distinguish different values of luminance, hue, size,
                 and orientation. This information is summarized in a
                 perceptual display hierarchy, a formalization
                 describing how many pixels- resolution -and how much
                 physical area on a viewer's retina- visual angle -is
                 required for an element's visual properties to be
                 readily seen. We demonstrate our theoretical results by
                 visualizing historical climatology data from the
                 International Panel for Climate Change.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Oulasvirta:2012:HRR,
  author =       "Antti Oulasvirta and Antti Nurminen and Tiia
                 Suomalainen",
  title =        "How real is real enough? {Optimal} reality sampling
                 for fast recognition of mobile imagery",
  journal =      j-TAP,
  volume =       "9",
  number =       "4",
  pages =        "21:1--21:??",
  month =        oct,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2355598.2355604",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Mon Oct 22 11:06:19 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present the first study to discover optimal reality
                 sampling for mobile imagery. In particular, we identify
                 the minimum information required for fast recognition
                 of images of directly perceivable real-world buildings
                 displayed on a mobile device. Resolution, image size,
                 and JPEG compression of images of fa{\c{c}}ades were
                 manipulated in a same--different recognition task
                 carried out in the field. Best-effort performance is
                 shown to be reachable with significantly lower detail
                 granularity than previously thought. For best user
                 performance, we recommend presenting images as large as
                 possible on the screen and decreasing resolution
                 accordingly.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Merer:2013:PCM,
  author =       "Adrien Merer and Mitsuko Aramaki and S{\o}lvi Ystad
                 and Richard Kronland-Martinet",
  title =        "Perceptual characterization of motion evoked by sounds
                 for synthesis control purposes",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422106",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article addresses the question of synthesis and
                 control of sound attributes from a perceptual point of
                 view. We focused on an attribute related to the general
                 concept of motion evoked by sounds. To investigate this
                 concept, we tested 40 monophonic abstract sounds on
                 listeners via a questionnaire and drawings, using a
                 parametrized custom interface. This original procedure,
                 which was defined with synthesis and control
                 perspectives in mind, provides an alternative means of
                 determining intuitive control parameters for
                 synthesizing sounds evoking motion. Results showed that
                 three main shape categories (linear, with regular
                 oscillations, and with circular oscillations) and three
                 types of direction (rising, descending, and horizontal)
                 were distinguished by the listeners. In addition, the
                 subjects were able to perceive the low-frequency
                 oscillations (below 8 Hz) quite accurately. Three size
                 categories (small, medium, and large) and three levels
                 of randomness (none, low amplitude irregularities, and
                 high amplitude irregularities) and speed (constant
                 speed and speeds showing medium and large variations)
                 were also observed in our analyses of the participants'
                 drawings. We further performed a perceptual test to
                 confirm the relevance of the contribution of some
                 variables with synthesized sounds combined with visual
                 trajectories. Based on these results, a general
                 typology of evoked motion was drawn up and an intuitive
                 control strategy was designed, based on a symbolic
                 representation of continuous trajectories (provided by
                 devices such as motion capture systems, pen tablets,
                 etc.). These generic tools could be used in a wide
                 range of applications such as sound design, virtual
                 reality, sonification, and music.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bojrab:2013:PIL,
  author =       "Micah Bojrab and Michel Abdul-Massih and Bedrich
                 Benes",
  title =        "Perceptual importance of lighting phenomena in
                 rendering of animated water",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422107",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Recent years have seen increasing research in
                 perceptually-driven reductions in the costs of
                 realistically rendered imagery. Water is complex and
                 recognizable, and continues to be in the forefront of
                 research. However, the contribution of individual
                 lighting phenomena to the perceived realism of virtual
                 water has not been addressed. All these phenomena have
                 costs associated with their rendering, but does the
                 visual benefit outweigh these costs? This study
                 investigates the human perception of various
                 illumination components found in water-rich virtual
                 environments. The investigation uses a traditional
                 psychophysical analysis to examine viewer perception of
                 these lighting phenomena as they relate to the
                 rendering cost, and ultimately reveals common trends in
                 perceptual value. Five different scenes with a wide
                 range of water and lighting dynamics were tested for
                 perceptual value by one hundred participants. Our
                 results provide an importance comparison for lighting
                 phenomena in the rendering of water, and cost
                 reductions can be made with little or no effect on the
                 perceived quality of the imagery if viewed in a
                 scenario similar to our testing.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Selmanovic:2013:GSH,
  author =       "Elmedin Selmanovi{\'c} and Kurt Debattista and Thomas
                 Bashford-Rogers and Alan Chalmers",
  title =        "Generating stereoscopic {HDR} images using {HDR--LDR}
                 image pairs",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422108",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A number of novel imaging technologies have been
                 gaining popularity over the past few years. Foremost
                 among these are stereoscopy and high dynamic range
                 (HDR) Imaging. While a large body of research has
                 looked into each of these imaging technologies
                 independently, very little work has attempted to
                 combine them. This is mostly due to the current
                 limitations in capture and display. In this article, we
                 mitigate problems of capturing Stereoscopic HDR (SHDR)
                 that would potentially require two HDR cameras, by
                 capturing an HDR and LDR pair and using it to generate
                 3D stereoscopic HDR content. We ran a detailed user
                 study to compare four different methods of generating
                 SHDR content. The methods investigated were the
                 following: two based on expanding the luminance of the
                 LDR image, and two utilizing stereo correspondence
                 methods, which were adapted for our purposes. Results
                 demonstrate that one of the stereo correspondence
                 methods may be considered perceptually
                 indistinguishable from the ground truth (image pair
                 captured using two HDR cameras), while the other
                 methods are all significantly distinct from the ground
                 truth.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gamper:2013:SSD,
  author =       "Hannes Gamper and Christina Dicke and Mark
                 Billinghurst and Kai Puolam{\"a}ki",
  title =        "Sound sample detection and numerosity estimation using
                 auditory display",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422109",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article investigates the effect of various design
                 parameters of auditory information display on user
                 performance in two basic information retrieval tasks.
                 We conducted a user test with 22 participants in which
                 sets of sound samples were presented. In the first
                 task, the test participants were asked to detect a
                 given sample among a set of samples. In the second
                 task, the test participants were asked to estimate the
                 relative number of instances of a given sample in two
                 sets of samples. We found that the stimulus onset
                 asynchrony (SOA) of the sound samples had a significant
                 effect on user performance in both tasks. For the
                 sample detection task, the average error rate was about
                 10\% with an SOA of 100 ms. For the numerosity
                 estimation task, an SOA of at least 200 ms was
                 necessary to yield average error rates lower than 30\%
                 . Other parameters, including the samples' sound type
                 (synthesized speech or earcons) and spatial quality
                 (multichannel loudspeaker or diotic headphone
                 playback), had no substantial effect on user
                 performance. These results suggest that diotic, or
                 indeed monophonic, playback with appropriately chosen
                 SOA may be sufficient in practical applications for
                 users to perform the given information retrieval tasks,
                 if information about the sample location is not
                 relevant. If location information was provided through
                 spatial playback of the samples, test subjects were
                 able to simultaneously detect and localize a sample
                 with reasonable accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhao:2013:API,
  author =       "Mingtian Zhao and Song-Chun Zhu",
  title =        "Abstract painting with interactive control of
                 perceptual entropy",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422110",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article presents a framework for generating
                 abstract art from photographs. The aesthetics of
                 abstract art is largely attributed to its greater
                 perceptual ambiguity than photographs. According to
                 psychological theories [Berlyne 1971], the ambiguity
                 tends to invoke moderate mental effort in the viewer
                 for interpreting the underlying contents, and this
                 process is usually accompanied by subtle aesthetic
                 pleasure. We study this phenomenon through human
                 experiments comparing the subjects' interpretations of
                 abstract art and photographs, and quantitatively
                 verify, the increased perceptual ambiguities in terms
                 of recognition accuracy and response time. Based on the
                 studies, we measure the level of perceptual ambiguity
                 using entropy, as it measures uncertainty levels in
                 information theory, and propose a painterly rendering
                 method with interactive control of the ambiguity
                 levels. Given an input photograph, we first segment it
                 into regions corresponding to different objects and
                 parts in an interactive manner and organize them into a
                 hierarchical parse tree representation. Then we execute
                 a painterly rendering process with image obscuring
                 operators to transfer the photograph into an abstract
                 painting style with increased perceptual ambiguities in
                 both the scene and individual objects. Finally, using
                 kernel density estimation and message-passing
                 algorithms, we compute and control the ambiguity levels
                 numerically to the desired levels, during which we may
                 predict and control the viewer's perceptual path among
                 the image contents by assigning different ambiguity
                 levels to different objects. We have evaluated the
                 rendering results using a second set of human
                 experiments, and verified that they achieve similar
                 abstract effects to original abstract paintings.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kyto:2013:IRD,
  author =       "Mikko Kyt{\"o} and Aleksi M{\"a}kinen and Jukka
                 H{\"a}kkinen and Pirkko Oittinen",
  title =        "Improving relative depth judgments in augmented
                 reality with auxiliary augmentations",
  journal =      j-TAP,
  volume =       "10",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2422105.2422111",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Feb 28 16:35:15 MST 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Significant depth judgment errors are common in
                 augmented reality. This study presents a visualization
                 approach for improving relative depth judgments in
                 augmented reality. The approach uses auxiliary
                 augmented objects in addition to the main augmentation
                 to support ordinal and interval depth judgment tasks.
                 The auxiliary augmentations are positioned spatially
                 near real-world objects, and the location of the main
                 augmentation can be deduced based on the relative depth
                 cues between the augmented objects. In the experimental
                 part, the visualization approach was tested in the
                 ``X-ray'' visualization case with a video see-through
                 system. Two relative depth cues, in addition to motion
                 parallax, were used between graphical objects: relative
                 size and binocular disparity. The results show that the
                 presence of auxiliary objects significantly reduced
                 errors in depth judgment. Errors in judging the ordinal
                 location with respect to a wall (front, at, or behind)
                 and judging depth intervals were reduced. In addition
                 to reduced errors, the presence of auxiliary
                 augmentation increased the confidence in depth
                 judgments, and it was subjectively preferred. The
                 visualization approach did not have an effect on the
                 viewing time.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Bouchara:2013:CMS,
  author =       "Tifanie Bouchara and Christian Jacquemin and Brian F.
                 G. Katz",
  title =        "Cueing multimedia search with audiovisual blur",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Situated in the context of multimedia browsing, this
                 study concerns perceptual processes involved in
                 searching for an audiovisual object displayed among
                 several distractors. The aim of the study is to
                 increase the perceptual saliency of the target in order
                 to enhance the search process. As blurring distractors
                 and maintaining the target sharp has proved to be a
                 great facilitator of visual search, we propose
                 combining visual blur with an audio blur analogue to
                 improve multimodal search. Three perceptual experiments
                 were performed in which participants had to retrieve an
                 audiovisual object from a set of six competing stimuli.
                 The first two experiments explored the effect of blur
                 level on unimodal search tasks. A third experiment
                 investigated the influence of an audio and visual
                 modality combination with both modalities cued on an
                 audiovisual search task. Results showed that both
                 visual and audio blurs render stimuli distractors less
                 prominent and thus helped users focus on a sharp target
                 more easily. Performances were also faster and more
                 accurate in the bimodal condition than in either
                 unimodal search task, auditory or visual. Our work
                 suggests that audio and audiovisual interfaces
                 dedicated to multimedia search could benefit from
                 different uses of blur on presentation strategies.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Zhan:2013:MDF,
  author =       "Ce Zhan and Wanqing Li and Philip Ogunbona",
  title =        "Measuring the degree of face familiarity based on
                 extended {NMF}",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Getting familiar with a face is an important cognitive
                 process in human perception of faces, but little study
                 has been reported on how to objectively measure the
                 degree of familiarity. In this article, a method is
                 proposed to quantitatively measure the familiarity of a
                 face with respect to a set of reference faces that have
                 been seen previously. The proposed method models the
                 context-free and context-dependent forms of familiarity
                 suggested by psychological studies and accounts for the
                 key factors, namely exposure frequency, exposure
                 intensity and similar exposure, that affect human
                 perception of face familiarity. Specifically, the
                 method divides the reference set into nonexclusive
                 groups and measures the familiarity of a given face by
                 aggregating the similarities of the face to the
                 individual groups. In addition, the nonnegative matrix
                 factorization (NMF) is extended in this paper to learn
                 a compact and localized subspace representation for
                 measuring the similarities of the face with respect to
                 the individual groups. The proposed method has been
                 evaluated through experiments that follow the protocols
                 commonly used in psychological studies and has been
                 compared with subjective evaluation. Results have shown
                 that the proposed measurement is highly consistent with
                 the subjective judgment of face familiarity. Moreover,
                 a face recognition method is devised using the concept
                 of face familiarity and the results on the standard
                 FERET evaluation protocols have further verified the
                 efficacy of the proposed familiarity measurement.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
  keywords =     "nonnegative matrix factorization (NMF)",
}

@Article{Nymoen:2013:ACB,
  author =       "Kristian Nymoen and Rolf Inge God{\o}y and Alexander
                 Refsum Jensenius and Jim Torresen",
  title =        "Analyzing correspondence between sound objects and
                 body motion",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Links between music and body motion can be studied
                 through experiments called sound-tracing. One of the
                 main challenges in such research is to develop robust
                 analysis techniques that are able to deal with the
                 multidimensional data that musical sound and body
                 motion present. The article evaluates four different
                 analysis methods applied to an experiment in which
                 participants moved their hands following perceptual
                 features of short sound objects. Motion capture data
                 has been analyzed and correlated with a set of
                 quantitative sound features using four different
                 methods: (a) a pattern recognition classifier, (b)
                 $t$-tests, (c) Spearman's $ \rho $ correlation, and (d)
                 canonical correlation. This article shows how the
                 analysis methods complement each other, and that
                 applying several analysis techniques to the same data
                 set can broaden the knowledge gained from the
                 experiment.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Sugano:2013:GBJ,
  author =       "Yusuke Sugano and Yasuyuki Matsushita and Yoichi
                 Sato",
  title =        "Graph-based joint clustering of fixations and visual
                 entities",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We present a method that extracts groups of fixations
                 and image regions for the purpose of gaze analysis and
                 image understanding. Since the attentional relationship
                 between visual entities conveys rich information,
                 automatically determining the relationship provides us
                 a semantic representation of images. We show that, by
                 jointly clustering human gaze and visual entities, it
                 is possible to build meaningful and comprehensive
                 metadata that offer an interpretation about how people
                 see images. To achieve this, we developed a clustering
                 method that uses a joint graph structure between
                 fixation points and over-segmented image regions to
                 ensure a cross-domain smoothness constraint. We show
                 that the proposed clustering method achieves better
                 performance in relating attention to visual entities in
                 comparison with standard clustering techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Ruddle:2013:LWV,
  author =       "Roy A. Ruddle and Ekaterina Volkova and Heinrich H.
                 B{\"u}lthoff",
  title =        "Learning to walk in virtual reality",
  journal =      j-TAP,
  volume =       "10",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Sat Jun 1 11:28:31 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article provides longitudinal data for when
                 participants learned to travel with a walking metaphor
                 through virtual reality (VR) worlds, using interfaces
                 that ranged from joystick-only, to linear and
                 omnidirectional treadmills, and actual walking in VR.
                 Three metrics were used: travel time, collisions (a
                 measure of accuracy), and the speed profile. The time
                 that participants required to reach asymptotic
                 performance for traveling, and what that asymptote was,
                 varied considerably between interfaces. In particular,
                 when a world had tight turns (0.75 m corridors),
                 participants who walked were more proficient than those
                 who used a joystick to locomote and turned either
                 physically or with a joystick, even after 10 minutes of
                 training. The speed profile showed that this was caused
                 by participants spending a notable percentage of the
                 time stationary, irrespective of whether or not they
                 frequently played computer games. The study shows how
                 speed profiles can be used to help evaluate
                 participants' proficiency with travel interfaces,
                 highlights the need for training to be structured to
                 addresses specific weaknesses in proficiency (e.g.,
                 start-stop movement), and for studies to measure and
                 report that proficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Geigel:2013:ISI,
  author =       "Joe Geigel and Jeanine Stefanucci",
  title =        "Introduction to special issue {SAP 2013}",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2506206.2506207",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Tompkin:2013:PAA,
  author =       "James Tompkin and Min H. Kim and Kwang In Kim and Jan
                 Kautz and Christian Theobalt",
  title =        "Preference and artifact analysis for video transitions
                 of places",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2501601",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Emerging interfaces for video collections of places
                 attempt to link similar content with seamless
                 transitions. However, the automatic computer vision
                 techniques that enable these transitions have many
                 failure cases which lead to artifacts in the final
                 rendered transition. Under these conditions, which
                 transitions are preferred by participants and which
                 artifacts are most objectionable? We perform an
                 experiment with participants comparing seven transition
                 types, from movie cuts and dissolves to image-based
                 warps and virtual camera transitions, across five
                 scenes in a city. This document describes how we
                 condition this experiment on slight and considerable
                 view change cases, and how we analyze the feedback from
                 participants to find their preference for transition
                 types and artifacts. We discover that transition
                 preference varies with view change, that automatic
                 rendered transitions are significantly preferred even
                 with some artifacts, and that dissolve transitions are
                 comparable to less-sophisticated rendered transitions.
                 This leads to insights into what visual features are
                 important to maintain in a rendered transition, and to
                 an artifact ordering within our transitions.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Mccrae:2013:SPP,
  author =       "James Mccrae and Niloy J. Mitra and Karan Singh",
  title =        "Surface perception of planar abstractions",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2501853",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Various algorithms have been proposed to create planar
                 abstractions of 3D models, but there has been no
                 systematic effort to evaluate the effectiveness of such
                 abstractions in terms of perception of the abstracted
                 surfaces. In this work, we perform a large
                 crowd-sourced study involving approximately 70k samples
                 to evaluate how well users can orient gauges on planar
                 abstractions of commonly occurring models. We test four
                 styles of planar abstractions against ground truth
                 surface representations, and analyze the data to
                 discover a wide variety of correlations between task
                 error and measurements relating to surface-specific
                 properties such as curvature, local thickness and
                 medial axis distance, and abstraction-specific
                 properties. We use these discovered correlations to
                 create linear models to predict error in surface
                 understanding at a given point, for both surface
                 representations and planar abstractions. Our predictive
                 models reveal the geometric causes most responsible for
                 error, and we demonstrate their potential use to build
                 upon existing planar abstraction techniques in order to
                 improve perception of the abstracted surface.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Chen:2013:SPT,
  author =       "Jianhui Chen and Robert S. Allison",
  title =        "Shape perception of thin transparent objects with
                 stereoscopic viewing",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2506206.2506208",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Many materials, including water surfaces, jewels, and
                 glassware exhibit transparent refractions. The human
                 visual system can somehow recover 3D shape from
                 refracted images. While previous research has
                 elucidated various visual cues that can facilitate
                 visual perception of transparent objects, most of them
                 focused on monocular material perception. The question
                 of shape perception of transparent objects is much more
                 complex and few studies have been undertaken,
                 particular in terms of binocular vision. In this
                 article, we first design a system for stereoscopic
                 surface orientation estimation with photo-realistic
                 stimuli. It displays pre-rendered stereoscopic images
                 and a real-time S3D (Stereoscopic 3D) shape probe
                 simultaneously. Then we estimate people's perception of
                 the shape of thin transparent objects using a gauge
                 figure task. Our results suggest that people can
                 consistently perceive the surface orientation of thin
                 transparent objects, and stereoscopic viewing improves
                 the precision of estimates. To explain the results, we
                 present an edge-aware orientation map based on image
                 gradients and structure tensors to illustrate the
                 orientation information in images. We also decomposed
                 the normal direction of the surface into azimuth angle
                 and slant angle to explain why additional depth
                 information can improve the accuracy of perceived
                 normal direction.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Easa:2013:EMD,
  author =       "Haider K. Easa and Rafal K. Mantiuk and Ik Soo Lim",
  title =        "Evaluation of monocular depth cues on a
                 high-dynamic-range display for visualization",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2504568",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "The aim of this work is to identify the depth cues
                 that provide intuitive depth-ordering when used to
                 visualize abstract data. In particular we focus on the
                 depth cues that are effective on a high-dynamic-range
                 (HDR) display: contrast and brightness. In an
                 experiment participants were shown a visualization of
                 the volume layers at different depths with a single
                 isolated monocular cue as the only indication of depth.
                 The observers were asked to identify which slice of the
                 volume appears to be closer. The results show that
                 brightness, contrast and relative size are the most
                 effective monocular depth cues for providing an
                 intuitive depth ordering.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Argelaguet:2013:EIP,
  author =       "Ferran Argelaguet and David Antonio G{\'o}mez
                 J{\'a}uregui and Maud Marchal and Anatole L{\'e}cuyer",
  title =        "Elastic images: Perceiving local elasticity of images
                 through a novel pseudo-haptic deformation effect",
  journal =      j-TAP,
  volume =       "10",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2501599",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Fri Aug 16 07:50:57 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We introduce the Elastic Images, a novel pseudo-haptic
                 feedback technique which enables the perception of the
                 local elasticity of images without the need of any
                 haptic device. The proposed approach focus on whether
                 visual feedback is able to induce a sensation of
                 stiffness when the user interacts with an image using a
                 standard mouse. The user, when clicking on a Elastic
                 Image, is able to deform it locally according to its
                 elastic properties. To reinforce the effect, we also
                 propose the generation of procedural shadows and
                 creases to simulate the compressibility of the image
                 and several mouse cursors replacements to enhance
                 pressure and stiffness perception. A psychophysical
                 experiment was conducted to quantify this novel
                 pseudo-haptic perception and determine its perceptual
                 threshold (or its Just Noticeable Difference). The
                 results showed that users were able to recognize up to
                 eight different stiffness values with our proposed
                 method and confirmed that it provides a perceivable and
                 exploitable sensation of elasticity. The potential
                 applications of the proposed approach range from
                 pressure sensing in product catalogs and games, or its
                 usage in graphical user interfaces for increasing the
                 expressiveness of widgets.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Kelly:2013:SPV,
  author =       "Jonathan W. Kelly and Melissa Burton and Brice Pollock
                 and Eduardo Rubio and Michael Curtis and Julio {De La
                 Cruz} and Stephen Gilbert and Eliot Winer",
  title =        "Space perception in virtual environments: Displacement
                 from the center of projection causes less distortion
                 than predicted by cue-based models",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536765",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Virtual reality systems commonly include both
                 monocular and binocular depth cues, which have the
                 potential to provide viewers with a realistic
                 impression of spatial properties of the virtual
                 environment. However, when multiple viewers share the
                 same display, only one viewer typically receives the
                 projectively correct images. All other viewers
                 experience the same images despite displacement from
                 the center of projection (CoP). Three experiments
                 evaluated perceptual distortions caused by displacement
                 from the CoP and compared those percepts to predictions
                 of models based on monocular and binocular viewing
                 geometry. Leftward and rightward displacement from the
                 CoP caused virtual angles on the ground plane to be
                 judged as larger and smaller, respectively, compared to
                 judgments from the CoP. Backward and forward
                 displacement caused rectangles on the ground plane to
                 be judged as larger and smaller in depth, respectively,
                 compared to judgments from the CoP. Judgment biases
                 were in the same direction as cue-based model
                 predictions but of smaller magnitude. Displacement from
                 the CoP had asymmetric effects on perceptual judgments,
                 unlike model predictions. Perceptual distortion
                 occurred with monocular cues alone but was exaggerated
                 when binocular cues were added. The results are
                 grounded in terms of practical implications for
                 multiuser virtual environments.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Yildiz:2013:FAP,
  author =       "Zeynep Cipiloglu Yildiz and Abdullah Bulbul and Tolga
                 Capin",
  title =        "A framework for applying the principles of depth
                 perception to information visualization",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536766",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "During the visualization of 3D content, using the
                 depth cues selectively to support the design goals and
                 enabling a user to perceive the spatial relationships
                 between the objects are important concerns. In this
                 novel solution, we automate this process by proposing a
                 framework that determines important depth cues for the
                 input scene and the rendering methods to provide these
                 cues. While determining the importance of the cues, we
                 consider the user's tasks and the scene's spatial
                 layout. The importance of each depth cue is calculated
                 using a fuzzy logic--based decision system. Then,
                 suitable rendering methods that provide the important
                 cues are selected by performing a cost-profit analysis
                 on the rendering costs of the methods and their
                 contribution to depth perception. Possible cue
                 conflicts are considered and handled in the system. We
                 also provide formal experimental studies designed for
                 several visualization tasks. A statistical analysis of
                 the experiments verifies the success of our
                 framework.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Nunez-Varela:2013:MGC,
  author =       "Jose Nunez-Varela and Jeremy L. Wyatt",
  title =        "Models of gaze control for manipulation tasks",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536767",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Human studies have shown that gaze shifts are mostly
                 driven by the current task demands. In manipulation
                 tasks, gaze leads action to the next manipulation
                 target. One explanation is that fixations gather
                 information about task relevant properties, where task
                 relevance is signalled by reward. This work presents
                 new computational models of gaze shifting, where the
                 agent imagines ahead in time the informational effects
                 of possible gaze fixations. Building on our previous
                 work, the contributions of this article are: (i) the
                 presentation of two new gaze control models, (ii)
                 comparison of their performance to our previous model,
                 (iii) results showing the fit of all these models to
                 previously published human data, and (iv) integration
                 of a visual search process. The first new model selects
                 the gaze that most reduces positional uncertainty of
                 landmarks (Unc), and the second maximises expected
                 rewards by reducing positional uncertainty (RU). Our
                 previous approach maximises the expected gain in
                 cumulative reward by reducing positional uncertainty
                 (RUG). In experiment ii the models are tested on a
                 simulated humanoid robot performing a manipulation
                 task, and each model's performance is characterised by
                 varying three environmental variables. This experiment
                 provides evidence that the RUG model has the best
                 overall performance. In experiment iii, we compare the
                 hand-eye coordination timings of the models in a robot
                 simulation to those obtained from human data. This
                 provides evidence that only the models that incorporate
                 both uncertainty and reward (RU and RUG) match human
                 data.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Gaffary:2013:CAC,
  author =       "Yoren Gaffary and Victoria Eyharabide and Jean-Claude
                 Martin and Mehdi Ammi",
  title =        "Clustering approach to characterize haptic expressions
                 of emotions",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "21:1--21:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536768",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Several studies have investigated the relevance of
                 haptics to physically convey various types of emotion.
                 However, they use basic analysis approaches to identify
                 the relevant features for an effective communication of
                 emotion. This article presents an advanced analysis
                 approach, based on the clustering technique, that
                 enables the extraction of the general features of
                 affective haptic expressions as well as the
                 identification of specific features in order to
                 discriminate between close emotions that are difficult
                 to differentiate. This approach was tested in the
                 context of affective communication through a virtual
                 handshake. It uses a haptic device, which enables the
                 expression of 3D movements. The results of this
                 research were compared to those of the standard
                 Analysis of Variance method in order to highlight the
                 advantages and limitations of each approach.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Marentakis:2013:PIG,
  author =       "G. Marentakis and S. Mcadams",
  title =        "Perceptual impact of gesture control of
                 spatialization",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "22:1--22:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536769",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "In two experiments, visual cues from gesture control
                 of spatialization were found to affect auditory
                 movement perception depending on the identifiability of
                 auditory motion trajectories, the congruency of
                 audiovisual stimulation, the sensory focus of
                 attention, and the attentional process involved.
                 Visibility of the performer's gestures improved spatial
                 audio trajectory identification, but it shifted the
                 listeners' attention to vision, impairing auditory
                 motion encoding in the case of incongruent stimulation.
                 On the other hand, selectively directing attention to
                 audition resulted in interference from the visual cues
                 for acoustically ambiguous trajectories. Auditory
                 motion information was poorly preserved when dividing
                 attention between auditory and visual movement feedback
                 from performance gestures. An auditory focus of
                 attention is a listener strategy that maximizes
                 performance, due to the improvement caused by congruent
                 visual stimulation and its robustness to interference
                 from incongruent stimulation for acoustically
                 unambiguous trajectories. Attentional strategy and
                 auditory motion calibration are two aspects that need
                 to be considered when employing gesture control of
                 spatialization.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Turchet:2013:WPA,
  author =       "Luca Turchet and Stefania Serafin and Paola Cesari",
  title =        "Walking pace affected by interactive sounds simulating
                 stepping on different terrains",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "23:1--23:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536770",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article investigates whether auditory feedback
                 affects natural locomotion patterns. Individuals were
                 provided with footstep sounds simulating different
                 surface materials. The sounds were interactively
                 generated using shoes with pressure sensors. Results
                 showed that subjects' walking speed changed as a
                 function of the type of simulated ground material. This
                 effect may arise due to the presence of conflicting
                 information between the auditory and foot-haptic
                 modality, or because of an adjustment of locomotion to
                 the physical properties evoked by the sounds simulating
                 the ground materials. The results reported in this
                 study suggest that auditory feedback may be more
                 important in the regulation of walking in natural
                 environments than has been acknowledged. Furthermore,
                 auditory feedback could be used to develop novel
                 approaches to the design of therapeutic and
                 rehabilitation procedures for locomotion.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lylykangas:2013:IVS,
  author =       "Jani Lylykangas and Veikko Surakka and Jussi Rantala
                 and Roope Raisamo",
  title =        "Intuitiveness of vibrotactile speed regulation cues",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "24:1--24:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536771",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Interpretations of vibrotactile stimulations were
                 compared between two participant groups. In both
                 groups, the task was to evaluate specifically designed
                 tactile stimulations presented to the wrist or chest.
                 Ascending, constant, and descending vibration frequency
                 profiles of the stimuli represented information for
                 three different speed regulation instructions:
                 ``accelerate your speed,'' ``keep your speed
                 constant,'' and ``decelerate your speed,''
                 respectively. The participants were treated differently
                 so that one of the groups was first taught (i.e.,
                 primed) the meanings of the stimuli, whereas the other
                 group was not taught (i.e., unprimed). The results
                 showed that the stimuli were evaluated nearly equally
                 in the primed and the unprimed groups. The best
                 performing stimuli communicated the three intended
                 meanings in the rate of 88\% to 100\% in the primed
                 group and in the unprimed group in the rate of 71\% to
                 83\%. Both groups performed equally in evaluating
                 ``keep your speed constant'' and ``decelerate your
                 speed'' information. As the unprimed participants
                 performed similarly to the primed participants, the
                 results suggest that vibrotactile stimulation can be
                 intuitively understood. The results suggest further
                 that carefully designed vibrotactile stimulations could
                 be functional in delivering easy-to-understand feedback
                 on how to regulate the speed of movement, such as in
                 physical exercise and rehabilitation applications.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Blom:2013:VTC,
  author =       "Kristopher J. Blom and Steffi Beckhaus",
  title =        "Virtual travel collisions: Response method influences
                 perceived realism of virtual environments",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "25:1--25:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536772",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Travel methods are the most basic and widespread
                 interaction method with virtual environments. They are
                 the primary and often the only way the user
                 interactively experiences the environment. We present a
                 study composed of three experiments that investigates
                 how virtual collisions methods and feedback impact user
                 perception of the realism of collisions and the virtual
                 environment. A wand-based virtual travel method was
                 used to navigate maze environments in an immersive
                 projective system. The results indicated that the
                 introduction of collision handling significantly
                 improved the user's perception of the realism of the
                 environment and collisions. An effect of feedback on
                 the perceived level of realism of collisions and
                 solidity of the environment was also found. Our results
                 indicate that feedback should be context appropriate,
                 e.g. fitting to a collision with the object; yet, the
                 modality and richness of feedback were only important
                 in that traditional color change feedback did not
                 perform as well as audio or haptic feedback. In
                 combination, the experiments indicated that in
                 immersive virtual environments the stop collision
                 handling method produced a more realistic impression
                 than the slide method that is popular in games. In
                 total, the study suggests that feedback fitting the
                 collision context, coupled with the stop handling
                 method, provides the best perceived realism of
                 collisions and scene.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Lin:2013:SMA,
  author =       "Kai-Hsiang Lin and Xiaodan Zhuang and Camille
                 Goudeseune and Sarah King and Mark Hasegawa-Johnson and
                 Thomas S. Huang",
  title =        "Saliency-maximized audio visualization and efficient
                 audio-visual browsing for faster-than-real-time human
                 acoustic event detection",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "26:1--26:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536773",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Browsing large audio archives is challenging because
                 of the limitations of human audition and attention.
                 However, this task becomes easier with a suitable
                 visualization of the audio signal, such as a
                 spectrogram transformed to make unusual audio events
                 salient. This transformation maximizes the mutual
                 information between an isolated event's spectrogram and
                 an estimate of how salient the event appears in its
                 surrounding context. When such spectrograms are
                 computed and displayed with fluid zooming over many
                 temporal orders of magnitude, sparse events in long
                 audio recordings can be detected more quickly and more
                 easily. In particular, in a 1/10-real-time acoustic
                 event detection task, subjects who were shown
                 saliency-maximized rather than conventional
                 spectrograms performed significantly better. Saliency
                 maximization also improves the mutual information
                 between the ground truth of nonbackground sounds and
                 visual saliency, more than other common enhancements to
                 visualization.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Komogortsev:2013:LOP,
  author =       "Oleg Komogortsev and Corey Holland and Sampath
                 Jayarathna and Alex Karpov",
  title =        "{$2$D} Linear oculomotor plant mathematical model:
                 Verification and biometric applications",
  journal =      j-TAP,
  volume =       "10",
  number =       "4",
  pages =        "27:1--27:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536764.2536774",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Thu Mar 13 09:38:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article assesses the ability of a two-dimensional
                 (2D) linear homeomorphic oculomotor plant mathematical
                 model to simulate normal human saccades on a 2D plane.
                 The proposed model is driven by a simplified pulse-step
                 neuronal control signal and makes use of linear
                 simplifications to account for the unique
                 characteristics of the eye globe and the extraocular
                 muscles responsible for horizontal and vertical eye
                 movement. The linear nature of the model sacrifices
                 some anatomical accuracy for computational speed and
                 analytic tractability, and may be implemented as two
                 one-dimensional models for parallel signal simulation.
                 Practical applications of the model might include
                 improved noise reduction and signal recovery facilities
                 for eye tracking systems, additional metrics from which
                 to determine user effort during usability testing, and
                 enhanced security in biometric identification systems.
                 The results indicate that the model is capable of
                 produce oblique saccades with properties resembling
                 those of normal human saccades and is capable of
                 deriving muscle constants that are viable as biometric
                 indicators. Therefore, we conclude that sacrifice in
                 the anatomical accuracy of the model produces
                 negligible effects on the accuracy of saccadic
                 simulation on a 2D plane and may provide a usable model
                 for applications in computer science, human-computer
                 interaction, and related fields.",
  acknowledgement = ack-nhfb,
  articleno =    "27",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Caramiaux:2014:RSS,
  author =       "B. Caramiaux and F. Bevilacqua and T. Bianco and N.
                 Schnell and O. Houix and P. Susini",
  title =        "The Role of Sound Source Perception in Gestural Sound
                 Description",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "1:1--1:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536811",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "We investigated gesture description of sound stimuli
                 performed during a listening task. Our hypothesis is
                 that the strategies in gestural responses depend on the
                 level of identification of the sound source and
                 specifically on the identification of the action
                 causing the sound. To validate our hypothesis, we
                 conducted two experiments. In the first experiment, we
                 built two corpora of sounds. The first corpus contains
                 sounds with identifiable causal actions. The second
                 contains sounds for which no causal actions could be
                 identified. These corpora properties were validated
                 through a listening test. In the second experiment,
                 participants performed arm and hand gestures
                 synchronously while listening to sounds taken from
                 these corpora. Afterward, we conducted interviews
                 asking participants to verbalize their experience while
                 watching their own video recordings. They were
                 questioned on their perception of the listened sounds
                 and on their gestural strategies. We showed that for
                 the sounds where causal action can be identified,
                 participants mainly mimic the action that has produced
                 the sound. In the other case, when no action can be
                 associated with the sound, participants trace contours
                 related to sound acoustic features. We also found that
                 the interparticipants' gesture variability is higher
                 for causal sounds compared to noncausal sounds.
                 Variability demonstrates that, in the first case,
                 participants have several ways of producing the same
                 action, whereas in the second case, the sound features
                 tend to make the gesture responses consistent.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Stransky:2014:ELT,
  author =       "Debi Stransky and Laurie M. Wilcox and Robert S.
                 Allison",
  title =        "Effects of Long-Term Exposure on Sensitivity and
                 Comfort with Stereoscopic Displays",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "2:1--2:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2536810",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "Stereoscopic 3D media has recently increased in
                 appreciation and availability. This popularity has led
                 to concerns over the health effects of habitual viewing
                 of stereoscopic 3D content; concerns that are largely
                 hypothetical. Here we examine the effects of repeated,
                 long-term exposure to stereoscopic 3D in the workplace
                 on several measures of stereoscopic sensitivity
                 (discrimination, depth matching, and fusion limits)
                 along with reported negative symptoms associated with
                 viewing stereoscopic 3D. We recruited a group of adult
                 stereoscopic 3D industry experts and compared their
                 performance with observers who were (i) inexperienced
                 with stereoscopic 3D, (ii) researchers who study
                 stereopsis, and (iii) vision researchers with little or
                 no experimental stereoscopic experience. Unexpectedly,
                 we found very little difference between the four groups
                 on all but the depth discrimination task, and the
                 differences that did occur appear to reflect
                 task-specific training or experience. Thus, we found no
                 positive or negative consequences of repeated and
                 extended exposure to stereoscopic 3D in these
                 populations.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Wang:2014:OGL,
  author =       "Rui I. Wang and Brandon Pelfrey and Andrew T.
                 Duchowski and Donald H. House",
  title =        "Online {$3$D} Gaze Localization on Stereoscopic
                 Displays",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "3:1--3:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2593689",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "This article summarizes our previous work on
                 developing an online system to allow the estimation of
                 3D gaze depth using eye tracking in a stereoscopic
                 environment. We report on recent extensions allowing us
                 to report the full 3D gaze position. Our system employs
                 a 3D calibration process that determines the parameters
                 of a mapping from a naive depth estimate, based simply
                 on triangulation, to a refined 3D gaze point estimate
                 tuned to a particular user. We show that our system is
                 an improvement on the geometry-based 3D gaze estimation
                 returned by a proprietary algorithm provided with our
                 tracker. We also compare our approach with that of the
                 Parameterized Self-Organizing Map (PSOM) method, due to
                 Essig and colleagues, which also individually
                 calibrates to each user. We argue that our method is
                 superior in speed and ease of calibration, is easier to
                 implement, and does not require an iterative solver to
                 produce a gaze position, thus guaranteeing computation
                 at the rate of tracker acquisition. In addition, we
                 report on a user study that indicates that, compared
                 with PSOM, our method more accurately estimates gaze
                 depth, and is nearly as accurate in estimating
                 horizontal and vertical position. Results are verified
                 on two different 4D eye tracking systems, a high
                 accuracy Wheatstone haploscope and a medium accuracy
                 active stereo display. Thus, it is the recommended
                 method for applications that primarily require gaze
                 depth information, while its ease of use makes it
                 suitable for many applications requiring full 3D gaze
                 position.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

@Article{Pacchierotti:2014:ITT,
  author =       "Claudio Pacchierotti and Asad Tirmizi and Domenico
                 Prattichizzo",
  title =        "Improving Transparency in Teleoperation by Means of
                 Cutaneous Tactile Force Feedback",
  journal =      j-TAP,
  volume =       "11",
  number =       "1",
  pages =        "4:1--4:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2604969",
  ISSN =         "1544-3558 (print), 1544-3965 (electronic)",
  ISSN-L =       "1544-3558",
  bibdate =      "Tue Apr 22 18:09:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tap.bib",
  abstract =     "A study on the role of cutaneous and kinesthetic force
                 feedback in teleoperation is presented. Cutaneous cues
                 provide less transparency than kinesthetic force, but
                 they do not affect the stability of the teleoperation
                 system. On the other hand, kinesthesia provides a
                 compelling illusion of telepresence but affects the
                 stability of the haptic loop. However, when employing
                 common grounded haptic interfaces, it is not possible
                 to independently control the cutaneous and kinesthetic
                 components of the interaction. For this reason, many
                 control techniques ensure a stable interaction by
                 scaling down both kinesthetic and cutaneous force
                 feedback, even though acting on the cutaneous channel
                 is not necessary. We discuss here the feasibility of a
                 novel approach. It aims at improving the realism of the
                 haptic rendering, while preserving its stability, by
                 modulating cutaneous force to compensate for a lack of
                 kinesthesia. We carried out two teleoperation
                 experiments, evaluating (1) the role of cutaneous
                 stimuli when reducing kinesthesia and (2) the extent to
                 which an overactuation of the cutaneous channel can
                 fully compensate for a lack of kinesthetic force
                 feedback. Results showed that, to some extent, it is
                 possible to compensate for a lack of kinesthesia with
                 the aforementioned technique, without significant
                 performance degradation. Moreover, users showed a high
                 comfort level in using the proposed system.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Applied Perception (TAP)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J932",
}

%% TO DO: Check for more articles in v11n1