Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%% BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.14",
%%%     date            = "18 October 2016",
%%%     time            = "11:55:45 MDT",
%%%     filename        = "tiis.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "28406 6366 36566 344212",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; ACM Transactions on
%%%                        Interactive Intelligent Systems (TIIS)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        ACM Transactions on Interactive Intelligent
%%%                        Systems (TIIS) (CODEN ????, ISSN 2160-6455
%%%                        (print), 2160-6463 (electronic)), covering
%%%                        all journal issues from 2011 -- date.
%%%
%%%                        At version 1.14, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2011 (   6)    2013 (  20)    2015 (  24)
%%%                             2012 (  24)    2014 (  20)    2016 (  30)
%%%
%%%                             Article:        124
%%%
%%%                             Total entries:  124
%%%
%%%                        The journal Web site can be found at:
%%%
%%%                            http://tiis.acm.org/
%%%                            http://www.acm.org/tiis/
%%%
%%%                        The journal table of contents page is at:
%%%
%%%                            http://dl.acm.org/pub.cfm?id=J1341
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J1341
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        The initial draft was extracted from the ACM
%%%                        Web pages.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        bibsource keys in the bibliography entries
%%%                        below indicate the entry originally came
%%%                        from the computer science bibliography
%%%                        archive, even though it has likely since
%%%                        been corrected and updated.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility."
%%%     }
%%% ====================================================================
@Preamble{"\input bibnames.sty" #
    "\def \TM {${}^{\sc TM}$}"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TIIS                  = "ACM Transactions on Interactive Intelligent
                                  Systems (TIIS)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Jameson:2011:ITI,
  author =       "Anthony Jameson and John Riedl",
  title =        "Introduction to the {Transactions on Interactive
                 Intelligent Systems}",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030366",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kulesza:2011:WOE,
  author =       "Todd Kulesza and Simone Stumpf and Weng-Keen Wong and
                 Margaret M. Burnett and Stephen Perona and Andrew Ko
                 and Ian Oberst",
  title =        "Why-oriented end-user debugging of naive {Bayes} text
                 classification",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030367",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Hoi:2011:AMK,
  author =       "Steven C. H. Hoi and Rong Jin",
  title =        "Active multiple kernel learning for interactive {$3$D}
                 object retrieval systems",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030368",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Hammond:2011:RSM,
  author =       "Tracy Hammond and Brandon Paulson",
  title =        "Recognizing sketched multistroke primitives",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030369",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Okita:2011:MAA,
  author =       "Sandra Y. Okita and Victor Ng-Thow-Hing and Ravi K.
                 Sarvadevabhatla",
  title =        "Multimodal approach to affective human-robot
                 interaction design with children",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030370",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Gibet:2011:SSD,
  author =       "Sylvie Gibet and Nicolas Courty and Kyle Duarte and
                 Thibaut Le Naour",
  title =        "The {SignCom} system for data-driven animation of
                 interactive virtual signers: Methodology and
                 Evaluation",
  journal =      j-TIIS,
  volume =       "1",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2030365.2030371",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Nov 3 17:51:10 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Castellano:2012:ISI,
  author =       "Ginevra Castellano and Laurel D. Riek and Christopher
                 Peters and Kostas Karpouzis and Jean-Claude Martin and
                 Louis-Philippe Morency",
  title =        "Introduction to the special issue on affective
                 interaction in natural environments",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133367",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Affect-sensitive systems such as social robots and
                 virtual agents are increasingly being investigated in
                 real-world settings. In order to work effectively in
                 natural environments, these systems require the ability
                 to infer the affective and mental states of humans and
                 to provide appropriate timely output that helps to
                 sustain long-term interactions. This special issue,
                 which appears in two parts, includes articles on the
                 design of socio-emotional behaviors and expressions in
                 robots and virtual agents and on computational
                 approaches for the automatic recognition of social
                 signals and affective states.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Beck:2012:EBL,
  author =       "Aryel Beck and Brett Stevens and Kim A. Bard and Lola
                 Ca{\~n}amero",
  title =        "Emotional body language displayed by artificial
                 agents",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133368",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Complex and natural social interaction between
                 artificial agents (computer-generated or robotic) and
                 humans necessitates the display of rich emotions in
                 order to be believable, socially relevant, and
                 accepted, and to generate the natural emotional
                 responses that humans show in the context of social
                 interaction, such as engagement or empathy. Whereas
                 some robots use faces to display (simplified) emotional
                 expressions, for other robots such as Nao, body
                 language is the best medium available given their
                 inability to convey facial expressions. Displaying
                 emotional body language that can be interpreted whilst
                 interacting with the robot should significantly improve
                 naturalness. This research investigates the creation of
                 an affect space for the generation of emotional body
                 language to be displayed by humanoid robots. To do so,
                 three experiments investigating how emotional body
                 language displayed by agents is interpreted were
                 conducted. The first experiment compared the
                 interpretation of emotional body language displayed by
                 humans and agents. The results showed that emotional
                 body language displayed by an agent or a human is
                 interpreted in a similar way in terms of recognition.
                 Following these results, emotional key poses were
                 extracted from an actor's performances and implemented
                 in a Nao robot. The interpretation of these key poses
                 was validated in a second study where it was found that
                 participants were better than chance at interpreting
                 the key poses displayed. Finally, an affect space was
                 generated by blending key poses and validated in a
                 third study. Overall, these experiments confirmed that
                 body language is an appropriate medium for robots to
                 display emotions and suggest that an affect space for
                 body expressions can be used to improve the
                 expressiveness of humanoid robots.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Hiolle:2012:ECB,
  author =       "Antoine Hiolle and Lola Ca{\~n}amero and Marina
                 Davila-Ross and Kim A. Bard",
  title =        "Eliciting caregiving behavior in dyadic human-robot
                 attachment-like interactions",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133369",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present here the design and applications of an
                 arousal-based model controlling the behavior of a Sony
                 AIBO robot during the exploration of a novel
                 environment: a children's play mat. When the robot
                 experiences too many new perceptions, the increase of
                 arousal triggers calls for attention towards its human
                 caregiver. The caregiver can choose to either calm the
                 robot down by providing it with comfort, or to leave
                 the robot coping with the situation on its own. When
                 the arousal of the robot has decreased, the robot moves
                 on to further explore the play mat. We gathered results
                 from two experiments using this arousal-driven control
                 architecture. In the first setting, we show that such a
                 robotic architecture allows the human caregiver to
                 influence greatly the learning outcomes of the
                 exploration episode, with some similarities to a
                 primary caregiver during early childhood. In a second
                 experiment, we tested how human adults behaved in a
                 similar setup with two different robots: one `needy',
                 often demanding attention, and one more independent,
                 requesting far less care or assistance. Our results
                 show that human adults recognise each profile of the
                 robot for what they have been designed, and behave
                 accordingly to what would be expected, caring more for
                 the needy robot than for the other. Additionally, the
                 subjects exhibited a preference and more positive
                 affect whilst interacting and rating the robot we
                 designed as needy. This experiment leads us to the
                 conclusion that our architecture and setup succeeded in
                 eliciting positive and caregiving behavior from adults
                 of different age groups and technological background.
                 Finally, the consistency and reactivity of the robot
                 during this dyadic interaction appeared crucial for the
                 enjoyment and engagement of the human partner.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Scherer:2012:SLN,
  author =       "Stefan Scherer and Michael Glodek and Friedhelm
                 Schwenker and Nick Campbell and G{\"u}nther Palm",
  title =        "Spotting laughter in natural multiparty conversations:
                 a comparison of automatic online and offline approaches
                 using audiovisual data",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133370",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "It is essential for the advancement of human-centered
                 multimodal interfaces to be able to infer the current
                 user's state or communication state. In order to enable
                 a system to do that, the recognition and interpretation
                 of multimodal social signals (i.e., paralinguistic and
                 nonverbal behavior) in real-time applications is
                 required. Since we believe that laughs are one of the
                 most important and widely understood social nonverbal
                 signals indicating affect and discourse quality, we
                 focus in this work on the detection of laughter in
                 natural multiparty discourses. The conversations are
                 recorded in a natural environment without any specific
                 constraint on the discourses using unobtrusive
                 recording devices. This setup ensures natural and
                 unbiased behavior, which is one of the main foci of
                 this work. To compare results of methods, namely
                 Gaussian Mixture Model (GMM) supervectors as input to a
                 Support Vector Machine (SVM), so-called Echo State
                 Networks (ESN), and a Hidden Markov Model (HMM)
                 approach, are utilized in online and offline detection
                 experiments. The SVM approach proves very accurate in
                 the offline classification task, but is outperformed by
                 the ESN and HMM approach in the online detection (F 1
                 scores: GMM SVM 0.45, ESN 0.63, HMM 0.72). Further, we
                 were able to utilize the proposed HMM approach in a
                 cross-corpus experiment without any retraining with
                 respectable generalization capability (F 1 score:
                 0.49). The results and possible reasons for these
                 outcomes are shown and discussed in the article. The
                 proposed methods may be directly utilized in practical
                 tasks such as the labeling or the online detection of
                 laughter in conversational data and affect-aware
                 applications.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Song:2012:CBH,
  author =       "Yale Song and David Demirdjian and Randall Davis",
  title =        "Continuous body and hand gesture recognition for
                 natural human-computer interaction",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133371",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Intelligent gesture recognition systems open a new era
                 of natural human-computer interaction: Gesturing is
                 instinctive and a skill we all have, so it requires
                 little or no thought, leaving the focus on the task
                 itself, as it should be, not on the interaction
                 modality. We present a new approach to gesture
                 recognition that attends to both body and hands, and
                 interprets gestures continuously from an unsegmented
                 and unbounded input stream. This article describes the
                 whole procedure of continuous body and hand gesture
                 recognition, from the signal acquisition to processing,
                 to the interpretation of the processed signals. Our
                 system takes a vision-based approach, tracking body and
                 hands using a single stereo camera. Body postures are
                 reconstructed in 3D space using a generative
                 model-based approach with a particle filter, combining
                 both static and dynamic attributes of motion as the
                 input feature to make tracking robust to
                 self-occlusion. The reconstructed body postures guide
                 searching for hands. Hand shapes are classified into
                 one of several canonical hand shapes using an
                 appearance-based approach with a multiclass support
                 vector machine. Finally, the extracted body and hand
                 features are combined and used as the input feature for
                 gesture recognition. We consider our task as an online
                 sequence labeling and segmentation problem. A
                 latent-dynamic conditional random field is used with a
                 temporal sliding window to perform the task
                 continuously. We augment this with a novel technique
                 called multilayered filtering, which performs filtering
                 both on the input layer and the prediction layer.
                 Filtering on the input layer allows capturing
                 long-range temporal dependencies and reducing input
                 signal noise; filtering on the prediction layer allows
                 taking weighted votes of multiple overlapping
                 prediction results as well as reducing estimation
                 noise. We tested our system in a scenario of real-world
                 gestural interaction using the NATOPS dataset, an
                 official vocabulary of aircraft handling gestures. Our
                 experimental results show that: (1) the use of both
                 static and dynamic attributes of motion in body
                 tracking allows statistically significant improvement
                 of the recognition performance over using static
                 attributes of motion alone; and (2) the multilayered
                 filtering statistically significantly improves
                 recognition performance over the nonfiltering method.
                 We also show that, on a set of twenty-four NATOPS
                 gestures, our system achieves a recognition accuracy of
                 75.37\%.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Eyben:2012:MAC,
  author =       "Florian Eyben and Martin W{\"o}llmer and Bj{\"o}rn
                 Schuller",
  title =        "A multitask approach to continuous five-dimensional
                 affect sensing in natural speech",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "6:1--6:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133372",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Automatic affect recognition is important for the
                 ability of future technical systems to interact with us
                 socially in an intelligent way by understanding our
                 current affective state. In recent years there has been
                 a shift in the field of affect recognition from `in the
                 lab' experiments with acted data to `in the wild'
                 experiments with spontaneous and naturalistic data. Two
                 major issues thereby are the proper segmentation of the
                 input and adequate description and modeling of
                 affective states. The first issue is crucial for
                 responsive, real-time systems such as virtual agents
                 and robots, where the latency of the analysis must be
                 as small as possible. To address this issue we
                 introduce a novel method of incremental segmentation to
                 be used in combination with supra-segmental modeling.
                 For modeling of continuous affective states we use Long
                 Short-Term Memory Recurrent Neural Networks, with which
                 we can show an improvement in performance over standard
                 recurrent neural networks and feed-forward neural
                 networks as well as Support Vector Regression. For
                 experiments we use the SEMAINE database, which contains
                 recordings of spontaneous and natural human to
                 Wizard-of-Oz conversations. The recordings are
                 annotated continuously in time and magnitude with
                 FeelTrace for five affective dimensions, namely
                 activation, expectation, intensity, power/dominance,
                 and valence. To exploit dependencies between the five
                 affective dimensions we investigate multitask learning
                 of all five dimensions augmented with inter-rater
                 standard deviation. We can show improvements for
                 multitask over single-task modeling. Correlation
                 coefficients of up to 0.81 are obtained for the
                 activation dimension and up to 0.58 for the valence
                 dimension. The performance for the remaining dimensions
                 were found to be in between that for activation and
                 valence.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yazdani:2012:ARB,
  author =       "Ashkan Yazdani and Jong-Seok Lee and Jean-Marc Vesin
                 and Touradj Ebrahimi",
  title =        "Affect recognition based on physiological changes
                 during the watching of music videos",
  journal =      j-TIIS,
  volume =       "2",
  number =       "1",
  pages =        "7:1--7:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2133366.2133373",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Mar 16 12:34:07 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Assessing emotional states of users evoked during
                 their multimedia consumption has received a great deal
                 of attention with recent advances in multimedia content
                 distribution technologies and increasing interest in
                 personalized content delivery. Physiological signals
                 such as the electroencephalogram (EEG) and peripheral
                 physiological signals have been less considered for
                 emotion recognition in comparison to other modalities
                 such as facial expression and speech, although they
                 have a potential interest as alternative or
                 supplementary channels. This article presents our work
                 on: (1) constructing a dataset containing EEG and
                 peripheral physiological signals acquired during
                 presentation of music video clips, which is made
                 publicly available, and (2) conducting binary
                 classification of induced positive/negative valence,
                 high/low arousal, and like/dislike by using the
                 aforementioned signals. The procedure for the dataset
                 acquisition, including stimuli selection, signal
                 acquisition, self-assessment, and signal processing is
                 described in detail. Especially, we propose a novel
                 asymmetry index based on relative wavelet entropy for
                 measuring the asymmetry in the energy distribution of
                 EEG signals, which is used for EEG feature extraction.
                 Then, the classification systems based on EEG and
                 peripheral physiological signals are presented.
                 Single-trial and single-run classification results
                 indicate that, on average, the performance of the
                 EEG-based classification outperforms that of the
                 peripheral physiological signals. However, the
                 peripheral physiological signals can be considered as a
                 good alternative to EEG signals in the case of
                 assessing a user's preference for a given music video
                 clip (like/dislike) since they have a comparable
                 performance to EEG signals while being more easily
                 measured.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Park:2012:CFM,
  author =       "Souneil Park and Seungwoo Kang and Sangyoung Chung and
                 Junehwa Song",
  title =        "A Computational Framework for Media Bias Mitigation",
  journal =      j-TIIS,
  volume =       "2",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2209310.2209311",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:39 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Bias in the news media is an inherent flaw of the news
                 production process. The bias often causes a sharp
                 increase in political polarization and in the cost of
                 conflict on social issues such as the Iraq war. This
                 article presents NewsCube, a novel Internet news
                 service which aims to mitigate the effect of media
                 bias. NewsCube automatically creates and promptly
                 provides readers with multiple classified views on a
                 news event. As such, it helps readers understand the
                 event from a plurality of views and to formulate their
                 own, more balanced, viewpoints. The media bias problem
                 has been studied extensively in mass communications and
                 social science. This article reviews related mass
                 communication and journalism studies and provides a
                 structured view of the media bias problem and its
                 solution. We propose media bias mitigation as a
                 practical solution and demonstrate it through NewsCube.
                 We evaluate and discuss the effectiveness of NewsCube
                 through various performance studies.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Berkovsky:2012:IIF,
  author =       "Shlomo Berkovsky and Jill Freyne and Harri
                 Oinas-Kukkonen",
  title =        "Influencing Individually: Fusing Personalization and
                 Persuasion",
  journal =      j-TIIS,
  volume =       "2",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2209310.2209312",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:39 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Personalized technologies aim to enhance user
                 experience by taking into account users' interests,
                 preferences, and other relevant information. Persuasive
                 technologies aim to modify user attitudes, intentions,
                 or behavior through computer-human dialogue and social
                 influence. While both personalized and persuasive
                 technologies influence user interaction and behavior,
                 we posit that this influence could be significantly
                 increased if the two technologies were combined to
                 create personalized and persuasive systems. For
                 example, the persuasive power of a one-size-fits-all
                 persuasive intervention could be enhanced by
                 considering the users being influenced and their
                 susceptibility to the persuasion being offered.
                 Likewise, personalized technologies could cash in on
                 increased success, in terms of user satisfaction,
                 revenue, and user experience, if their services used
                 persuasive techniques. Hence, the coupling of
                 personalization and persuasion has the potential to
                 enhance the impact of both technologies. This new,
                 developing area clearly offers mutual benefits to both
                 research areas, as we illustrate in this special
                 issue.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kaptein:2012:APS,
  author =       "Maurits Kaptein and Boris {De Ruyter} and Panos
                 Markopoulos and Emile Aarts",
  title =        "Adaptive Persuasive Systems: a Study of Tailored
                 Persuasive Text Messages to Reduce Snacking",
  journal =      j-TIIS,
  volume =       "2",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2209310.2209313",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:39 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article describes the use of personalized short
                 text messages (SMS) to reduce snacking. First, we
                 describe the development and validation ( N = 215) of a
                 questionnaire to measure individual susceptibility to
                 different social influence strategies. To evaluate the
                 external validity of this Susceptibility to Persuasion
                 Scale (STPS) we set up a two week text-messaging
                 intervention that used text messages implementing
                 social influence strategies as prompts to reduce
                 snacking behavior. In this experiment ( N = 73) we show
                 that messages that are personalized (tailored) to the
                 individual based on their scores on the STPS, lead to a
                 higher decrease in snacking consumption than randomized
                 messages or messages that are not tailored
                 (contra-tailored) to the individual. We discuss the
                 importance of this finding for the design of persuasive
                 systems and detail how designers can use tailoring at
                 the level of social influence strategies to increase
                 the effects of their persuasive technologies.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Cremonesi:2012:IPP,
  author =       "Paolo Cremonesi and Franca Garzotto and Roberto
                 Turrin",
  title =        "Investigating the Persuasion Potential of Recommender
                 Systems from a Quality Perspective: an Empirical
                 Study",
  journal =      j-TIIS,
  volume =       "2",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2209310.2209314",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:39 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Recommender Systems (RSs) help users search large
                 amounts of digital contents and services by allowing
                 them to identify the items that are likely to be more
                 attractive or useful. RSs play an important persuasion
                 role, as they can potentially augment the users' trust
                 towards in an application and orient their decisions or
                 actions towards specific directions. This article
                 explores the persuasiveness of RSs, presenting two vast
                 empirical studies that address a number of research
                 questions. First, we investigate if a design property
                 of RSs, defined by the statistically measured quality
                 of algorithms, is a reliable predictor of their
                 potential for persuasion. This factor is measured in
                 terms of perceived quality, defined by the overall
                 satisfaction, as well as by how users judge the
                 accuracy and novelty of recommendations. For our
                 purposes, we designed an empirical study involving 210
                 subjects and implemented seven full-sized versions of a
                 commercial RS, each one using the same interface and
                 dataset (a subset of Netflix), but each with a
                 different recommender algorithm. In each experimental
                 configuration we computed the statistical quality
                 (recall and F-measures) and collected data regarding
                 the quality perceived by 30 users. The results show us
                 that algorithmic attributes are less crucial than we
                 might expect in determining the user's perception of an
                 RS's quality, and suggest that the user's judgment and
                 attitude towards a recommender are likely to be more
                 affected by factors related to the user experience.
                 Second, we explore the persuasiveness of RSs in the
                 context of large interactive TV services. We report a
                 study aimed at assessing whether measurable persuasion
                 effects (e.g., changes of shopping behavior) can be
                 achieved through the introduction of a recommender. Our
                 data, collected for more than one year, allow us to
                 conclude that, (1) the adoption of an RS can affect
                 both the lift factor and the conversion rate,
                 determining an increased volume of sales and
                 influencing the user's decision to actually buy one of
                 the recommended products, (2) the introduction of an RS
                 tends to diversify purchases and orient users towards
                 less obvious choices (the long tail), and (3) the
                 perceived novelty of recommendations is likely to be
                 more influential than their perceived accuracy.
                 Overall, the results of these studies improve our
                 understanding of the persuasion phenomena induced by
                 RSs, and have implications that can be of interest to
                 academic scholars, designers, and adopters of this
                 class of systems.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Andrews:2012:SPP,
  author =       "Pierre Y. Andrews",
  title =        "System Personality and Persuasion in Human-Computer
                 Dialogue",
  journal =      j-TIIS,
  volume =       "2",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2209310.2209315",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:39 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The human-computer dialogue research field has been
                 studying interaction with computers since the early
                 stage of Artificial Intelligence, however, research has
                 often focused on very practical tasks to be completed
                 with the dialogues. A new trend in the field tries to
                 implement persuasive techniques with automated
                 interactive agents; unlike booking a train ticket, for
                 example, such dialogues require the system to show more
                 anthropomorphic qualities. The influences of such
                 qualities in the effectiveness of persuasive dialogue
                 is only starting to be studied. In this article we
                 focus on one important perceived trait of the system:
                 personality, and explore how it influences the
                 persuasiveness of a dialogue system. We introduce a new
                 persuasive dialogue system and combine it with a state
                 of the art personality utterance generator. By doing
                 so, we can control the system's extraversion
                 personality trait and observe its influence on the
                 user's perception of the dialogue and its output. In
                 particular, we observe that the user's extraversion
                 influences their perception of the dialogue and its
                 persuasiveness, and that the perceived personality of
                 the system can affect its trustworthiness and
                 persuasiveness. We believe that theses observations
                 will help to set up guidelines to tailor dialogue
                 systems to the user's interaction expectations and
                 improve the persuasive interventions.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Vig:2012:TGE,
  author =       "Jesse Vig and Shilad Sen and John Riedl",
  title =        "The Tag Genome: Encoding Community Knowledge to
                 Support Novel Interaction",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "13:1--13:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362395",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article introduces the tag genome, a data
                 structure that extends the traditional tagging model to
                 provide enhanced forms of user interaction. Just as a
                 biological genome encodes an organism based on a
                 sequence of genes, the tag genome encodes an item in an
                 information space based on its relationship to a common
                 set of tags. We present a machine learning approach for
                 computing the tag genome, and we evaluate several
                 learning models on a ground truth dataset provided by
                 users. We describe an application of the tag genome
                 called Movie Tuner which enables users to navigate from
                 one item to nearby items along dimensions represented
                 by tags. We present the results of a 7-week field trial
                 of 2,531 users of Movie Tuner and a survey evaluating
                 users' subjective experience. Finally, we outline the
                 broader space of applications of the tag genome.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Lieberman:2012:ISI,
  author =       "Henry Lieberman and Catherine Havasi",
  title =        "Introduction to the {Special Issue on Common Sense for
                 Interactive Systems}",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362396",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction describes the aims and
                 scope of the special issue on Common Sense for
                 Interactive Systems of the ACM Transactions on
                 Interactive Intelligent Systems. It explains why the
                 common sense knowledge problem is crucial for both
                 artificial intelligence and human-computer interaction,
                 and it shows how the four articles selected for this
                 issue fit into the theme.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Gil:2012:CCK,
  author =       "Yolanda Gil and Varun Ratnakar and Timothy Chklovski
                 and Paul Groth and Denny Vrandecic",
  title =        "Capturing Common Knowledge about Tasks: Intelligent
                 Assistance for To-Do Lists",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362397",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Although to-do lists are a ubiquitous form of personal
                 task management, there has been no work on intelligent
                 assistance to automate, elaborate, or coordinate a
                 user's to-dos. Our research focuses on three aspects of
                 intelligent assistance for to-dos. We investigated the
                 use of intelligent agents to automate to-dos in an
                 office setting. We collected a large corpus from users
                 and developed a paraphrase-based approach to matching
                 agent capabilities with to-dos. We also investigated
                 to-dos for personal tasks and the kinds of assistance
                 that can be offered to users by elaborating on them on
                 the basis of substep knowledge extracted from the Web.
                 Finally, we explored coordination of user tasks with
                 other users through a to-do management application
                 deployed in a popular social networking site. We
                 discuss the emergence of Social Task Networks, which
                 link users` tasks to their social network as well as to
                 relevant resources on the Web. We show the benefits of
                 using common sense knowledge to interpret and elaborate
                 to-dos. Conversely, we also show that to-do lists are a
                 valuable way to create repositories of common sense
                 knowledge about tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Swanson:2012:SAU,
  author =       "Reid Swanson and Andrew S. Gordon",
  title =        "Say Anything: Using Textual Case-Based Reasoning to
                 Enable Open-Domain Interactive Storytelling",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362398",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We describe Say Anything, a new interactive
                 storytelling system that collaboratively writes textual
                 narratives with human users. Unlike previous attempts,
                 this interactive storytelling system places no
                 restrictions on the content or direction of the user's
                 contribution to the emerging storyline. In response to
                 these contributions, the computer continues the
                 storyline with narration that is both coherent and
                 entertaining. This capacity for open-domain interactive
                 storytelling is enabled by an extremely large
                 repository of nonfiction personal stories, which is
                 used as a knowledge base in a case-based reasoning
                 architecture. In this article, we describe the three
                 main components of our case-based reasoning approach: a
                 million-item corpus of personal stories mined from
                 internet weblogs, a case retrieval strategy that is
                 optimized for narrative coherence, and an adaptation
                 strategy that ensures that repurposed sentences from
                 the case base are appropriate for the user's emerging
                 fiction. We describe a series of evaluations of the
                 system's ability to produce coherent and entertaining
                 stories, and we compare these narratives with
                 single-author stories posted to internet weblogs.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kuo:2012:PRM,
  author =       "Yen-Ling Kuo and Jane Yung-Jen Hsu",
  title =        "Planning for Reasoning with Multiple Common Sense
                 Knowledge Bases",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362399",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Intelligent user interfaces require common sense
                 knowledge to bridge the gap between the functionality
                 of applications and the user's goals. While current
                 reasoning methods have been used to provide contextual
                 information for interface agents, the quality of their
                 reasoning results is limited by the coverage of their
                 underlying knowledge bases. This article presents
                 reasoning composition, a planning-based approach to
                 integrating reasoning methods from multiple common
                 sense knowledge bases to answer queries. The reasoning
                 results of one reasoning method are passed to other
                 reasoning methods to form a reasoning chain to the
                 target context of a query. By leveraging different weak
                 reasoning methods, we are able to find answers to
                 queries that cannot be directly answered by querying a
                 single common sense knowledge base. By conducting
                 experiments on ConceptNet and WordNet, we compare the
                 reasoning results of reasoning composition, directly
                 querying merged knowledge bases, and spreading
                 activation. The results show an 11.03\% improvement in
                 coverage over directly querying merged knowledge bases
                 and a 49.7\% improvement in accuracy over spreading
                 activation. Two case studies are presented, showing how
                 reasoning composition can improve performance of
                 retrieval in a video editing system and a dialogue
                 assistant.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Dinakar:2012:CSR,
  author =       "Karthik Dinakar and Birago Jones and Catherine Havasi
                 and Henry Lieberman and Rosalind Picard",
  title =        "Common Sense Reasoning for Detection, Prevention, and
                 Mitigation of Cyberbullying",
  journal =      j-TIIS,
  volume =       "2",
  number =       "3",
  pages =        "18:1--18:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2362394.2362400",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Nov 6 19:14:40 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Cyberbullying (harassment on social networks) is
                 widely recognized as a serious social problem,
                 especially for adolescents. It is as much a threat to
                 the viability of online social networks for youth today
                 as spam once was to email in the early days of the
                 Internet. Current work to tackle this problem has
                 involved social and psychological studies on its
                 prevalence as well as its negative effects on
                 adolescents. While true solutions rest on teaching
                 youth to have healthy personal relationships, few have
                 considered innovative design of social network software
                 as a tool for mitigating this problem. Mitigating
                 cyberbullying involves two key components: robust
                 techniques for effective detection and reflective user
                 interfaces that encourage users to reflect upon their
                 behavior and their choices. Spam filters have been
                 successful by applying statistical approaches like
                 Bayesian networks and hidden Markov models. They can,
                 like Google's GMail, aggregate human spam judgments
                 because spam is sent nearly identically to many people.
                 Bullying is more personalized, varied, and contextual.
                 In this work, we present an approach for bullying
                 detection based on state-of-the-art natural language
                 processing and a common sense knowledge base, which
                 permits recognition over a broad spectrum of topics in
                 everyday life. We analyze a more narrow range of
                 particular subject matter associated with bullying
                 (e.g. appearance, intelligence, racial and ethnic
                 slurs, social acceptance, and rejection), and construct
                 BullySpace, a common sense knowledge base that encodes
                 particular knowledge about bullying situations. We then
                 perform joint reasoning with common sense knowledge
                 about a wide range of everyday life topics. We analyze
                 messages using our novel AnalogySpace common sense
                 reasoning technique. We also take into account social
                 network analysis and other factors. We evaluate the
                 model on real-world instances that have been reported
                 by users on Formspring, a social networking website
                 that is popular with teenagers. On the intervention
                 side, we explore a set of reflective user-interaction
                 paradigms with the goal of promoting empathy among
                 social network participants. We propose an ``air
                 traffic control''-like dashboard, which alerts
                 moderators to large-scale outbreaks that appear to be
                 escalating or spreading and helps them prioritize the
                 current deluge of user complaints. For potential
                 victims, we provide educational material that informs
                 them about how to cope with the situation, and connects
                 them with emotional support from others. A user
                 evaluation shows that in-context, targeted, and dynamic
                 help during cyberbullying situations fosters end-user
                 reflection that promotes better coping strategies.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Jameson:2012:ISI,
  author =       "Anthony Jameson and John Riedl",
  title =        "Introduction to the special issue on highlights of the
                 decade in interactive intelligent systems",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "19:1--19:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395124",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction explains the motivation
                 and origin of the TiiS special issue on Highlights of
                 the Decade in Interactive Intelligent Systems and shows
                 how its five articles exemplify the types of research
                 contribution that TiiS aims to encourage and publish.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Hoey:2012:PSD,
  author =       "Jesse Hoey and Craig Boutilier and Pascal Poupart and
                 Patrick Olivier and Andrew Monk and Alex Mihailidis",
  title =        "People, sensors, decisions: Customizable and adaptive
                 technologies for assistance in healthcare",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395125",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The ratio of healthcare professionals to care
                 recipients is dropping at an alarming rate,
                 particularly for the older population. It is estimated
                 that the number of persons with Alzheimer's disease,
                 for example, will top 100 million worldwide by the year
                 2050 [Alzheimer's Disease International 2009]. It will
                 become harder and harder to provide needed health
                 services to this population of older adults. Further,
                 patients are becoming more aware and involved in their
                 own healthcare decisions. This is creating a void in
                 which technology has an increasingly important role to
                 play as a tool to connect providers with recipients.
                 Examples of interactive technologies range from
                 telecare for remote regions to computer games promoting
                 fitness in the home. Currently, such technologies are
                 developed for specific applications and are difficult
                 to modify to suit individual user needs. The future
                 potential economic and social impact of technology in
                 the healthcare field therefore lies in our ability to
                 make intelligent devices that are customizable by
                 healthcare professionals and their clients, that are
                 adaptive to users over time, and that generalize across
                 tasks and environments. A wide application area for
                 technology in healthcare is for assistance and
                 monitoring in the home. As the population ages, it
                 becomes increasingly dependent on chronic healthcare,
                 such as assistance for tasks of everyday life (washing,
                 cooking, dressing), medication taking, nutrition, and
                 fitness. This article will present a summary of work
                 over the past decade on the development of intelligent
                 systems that provide assistance to persons with
                 cognitive disabilities. These systems are unique in
                 that they are all built using a common framework, a
                 decision-theoretic model for general-purpose assistance
                 in the home. In this article, we will show how this
                 type of general model can be applied to a range of
                 assistance tasks, including prompting for activities of
                 daily living, assistance for art therapists, and stroke
                 rehabilitation. This model is a Partially Observable
                 Markov Decision Process (POMDP) that can be customized
                 by end-users, that can integrate complex sensor
                 information, and that can adapt over time. These three
                 characteristics of the POMDP model will allow for
                 increasing uptake and long-term efficiency and
                 robustness of technology for assistance.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Carberry:2012:AMA,
  author =       "Sandra Carberry and Stephanie Elzer Schwartz and
                 Kathleen Mccoy and Seniz Demir and Peng Wu and Charles
                 Greenbacker and Daniel Chester and Edward Schwartz and
                 David Oliver and Priscilla Moraes",
  title =        "Access to multimodal articles for individuals with
                 sight impairments",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395126",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Although intelligent interactive systems have been the
                 focus of many research efforts, very few have addressed
                 systems for individuals with disabilities. This article
                 presents our methodology for an intelligent interactive
                 system that provides individuals with sight impairments
                 with access to the content of information graphics
                 (such as bar charts and line graphs) in popular media.
                 The article describes the methodology underlying the
                 system's intelligent behavior, its interface for
                 interacting with users, examples processed by the
                 implemented system, and evaluation studies both of the
                 methodology and the effectiveness of the overall
                 system. This research advances universal access to
                 electronic documents.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Chen:2012:MBI,
  author =       "Fang Chen and Natalie Ruiz and Eric Choi and Julien
                 Epps and M. Asif Khawaja and Ronnie Taib and Bo Yin and
                 Yang Wang",
  title =        "Multimodal behavior and interaction as indicators of
                 cognitive load",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "22:1--22:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395127",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "High cognitive load arises from complex time and
                 safety-critical tasks, for example, mapping out flight
                 paths, monitoring traffic, or even managing nuclear
                 reactors, causing stress, errors, and lowered
                 performance. Over the last five years, our research has
                 focused on using the multimodal interaction paradigm to
                 detect fluctuations in cognitive load in user behavior
                 during system interaction. Cognitive load variations
                 have been found to impact interactive behavior: by
                 monitoring variations in specific modal input features
                 executed in tasks of varying complexity, we gain an
                 understanding of the communicative changes that occur
                 when cognitive load is high. So far, we have identified
                 specific changes in: speech, namely acoustic, prosodic,
                 and linguistic changes; interactive gesture; and
                 digital pen input, both interactive and freeform. As
                 ground-truth measurements, galvanic skin response,
                 subjective, and performance ratings have been used to
                 verify task complexity. The data suggest that it is
                 feasible to use features extracted from behavioral
                 changes in multiple modal inputs as indices of
                 cognitive load. The speech-based indicators of load,
                 based on data collected from user studies in a variety
                 of domains, have shown considerable promise. Scenarios
                 include single-user and team-based tasks; think-aloud
                 and interactive speech; and single-word, reading, and
                 conversational speech, among others. Pen-based
                 cognitive load indices have also been tested with some
                 success, specifically with pen-gesture, handwriting,
                 and freeform pen input, including diagraming. After
                 examining some of the properties of these measurements,
                 we present a multimodal fusion model, which is
                 illustrated with quantitative examples from a case
                 study. The feasibility of employing user input and
                 behavior patterns as indices of cognitive load is
                 supported by experimental evidence. Moreover,
                 symptomatic cues of cognitive load derived from user
                 behavior such as acoustic speech signals, transcribed
                 text, digital pen trajectories of handwriting, and
                 shapes pen, can be supported by well-established
                 theoretical frameworks, including O'Donnell and
                 Eggemeier's workload measurement [1986] Sweller's
                 Cognitive Load Theory [Chandler and Sweller 1991], and
                 Baddeley's model of modal working memory [1992] as well
                 as McKinstry et al.'s [2008] and Rosenbaum's [2005]
                 action dynamics work. The benefit of using this
                 approach to determine the user's cognitive load in real
                 time is that the data can be collected implicitly that
                 is, during day-to-day use of intelligent interactive
                 systems, thus overcomes problems of intrusiveness and
                 increases applicability in real-world environments,
                 while adapting information selection and presentation
                 in a dynamic computer interface with reference to
                 load.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Dmello:2012:AAA,
  author =       "Sidney D'mello and Art Graesser",
  title =        "{AutoTutor} and {Affective Autotutor}: Learning by
                 talking with cognitively and emotionally intelligent
                 computers that talk back",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "23:1--23:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395128",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present AutoTutor and Affective AutoTutor as
                 examples of innovative 21$^{st}$ century interactive
                 intelligent systems that promote learning and
                 engagement. AutoTutor is an intelligent tutoring system
                 that helps students compose explanations of difficult
                 concepts in Newtonian physics and enhances computer
                 literacy and critical thinking by interacting with them
                 in natural language with adaptive dialog moves similar
                 to those of human tutors. AutoTutor constructs a
                 cognitive model of students' knowledge levels by
                 analyzing the text of their typed or spoken responses
                 to its questions. The model is used to dynamically
                 tailor the interaction toward individual students'
                 zones of proximal development. Affective AutoTutor
                 takes the individualized instruction and human-like
                 interactivity to a new level by automatically detecting
                 and responding to students' emotional states in
                 addition to their cognitive states. Over 20 controlled
                 experiments comparing AutoTutor with ecological and
                 experimental controls such reading a textbook have
                 consistently yielded learning improvements of
                 approximately one letter grade after brief
                 30--60-minute interactions. Furthermore, Affective
                 AutoTutor shows even more dramatic improvements in
                 learning than the original AutoTutor system,
                 particularly for struggling students with low domain
                 knowledge. In addition to providing a detailed
                 description of the implementation and evaluation of
                 AutoTutor and Affective AutoTutor, we also discuss new
                 and exciting technologies motivated by AutoTutor such
                 as AutoTutor-Lite, Operation ARIES, GuruTutor,
                 DeepTutor, MetaTutor, and AutoMentor. We conclude this
                 article with our vision for future work on interactive
                 and engaging intelligent tutoring systems.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kay:2012:CPS,
  author =       "Judy Kay and Bob Kummerfeld",
  title =        "Creating personalized systems that people can
                 scrutinize and control: Drivers, principles and
                 experience",
  journal =      j-TIIS,
  volume =       "2",
  number =       "4",
  pages =        "24:1--24:??",
  month =        dec,
  year =         "2012",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2395123.2395129",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:15 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Widespread personalized computing systems play an
                 already important and fast-growing role in diverse
                 contexts, such as location-based services,
                 recommenders, commercial Web-based services, and
                 teaching systems. The personalization in these systems
                 is driven by information about the user, a user model.
                 Moreover, as computers become both ubiquitous and
                 pervasive, personalization operates across the many
                 devices and information stores that constitute the
                 user's personal digital ecosystem. This enables
                 personalization, and the user models driving it, to
                 play an increasing role in people's everyday lives.
                 This makes it critical to establish ways to address key
                 problems of personalization related to privacy,
                 invisibility of personalization, errors in user models,
                 wasted user models, and the broad issue of enabling
                 people to control their user models and associated
                 personalization. We offer scrutable user models as a
                 foundation for tackling these problems. This article
                 argues the importance of scrutable user modeling and
                 personalization, illustrating key elements in case
                 studies from our work. We then identify the broad roles
                 for scrutable user models. The article describes how to
                 tackle the technical and interface challenges of
                 designing and building scrutable user modeling systems,
                 presenting design principles and showing how they were
                 established over our twenty years of work on the
                 Personis software framework. Our contributions are the
                 set of principles for scrutable personalization linked
                 to our experience from creating and evaluating
                 frameworks and associated applications built upon them.
                 These constitute a general approach to tackling
                 problems of personalization by enabling users to
                 scrutinize their user models as a basis for
                 understanding and controlling personalization.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Giunchiglia:2013:ISS,
  author =       "Fausto Giunchiglia and David Robertson",
  title =        "Introduction to the special section on
                 {Internet}-scale human problem solving",
  journal =      j-TIIS,
  volume =       "3",
  number =       "1",
  pages =        "1:1--1:??",
  month =        apr,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:17 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction first outlines some of the
                 research challenges raised by the emerging forms of
                 internet-scale human problem solving. It then explains
                 how the two articles in this special section can serve
                 as illuminating complementary case studies, providing
                 concrete examples embedded in general conceptual
                 frameworks.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yu:2013:ISI,
  author =       "Lixiu Yu and Jeffrey V. Nickerson",
  title =        "An {Internet}-scale idea generation system",
  journal =      j-TIIS,
  volume =       "3",
  number =       "1",
  pages =        "2:1--2:??",
  month =        apr,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:17 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "A method of organizing the crowd to generate ideas is
                 described. It integrates crowds using evolutionary
                 algorithms. The method increases the creativity of
                 ideas across generations, and it works better than
                 greenfield idea generation. Specifically, a design
                 space of internet-scale idea generation systems is
                 defined, and one instance is tested: a crowd idea
                 generation system that uses combination to improve
                 previous designs. The key process of the system is the
                 following: A crowd generates designs, then another
                 crowd combines the designs of the previous crowd. In an
                 experiment with 540 participants, the combined designs
                 were compared to the initial designs and to the designs
                 produced by a greenfield idea generation system. The
                 results show that the sequential combination system
                 produced more creative ideas in the last generation and
                 outperformed the greenfield idea generation system. The
                 design space of crowdsourced idea generation developed
                 here may be used to instantiate systems that can be
                 applied to a wide range of design problems. The work
                 has both pragmatic and theoretical implications: New
                 forms of coordination are now possible, and, using the
                 crowd, it is possible to test existing and emerging
                 theories of coordination and participatory design.
                 Moreover, it may be possible for human designers,
                 organized as a crowd, to codesign with each other and
                 with automated algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Poesio:2013:PDU,
  author =       "Massimo Poesio and Jon Chamberlain and Udo Kruschwitz
                 and Livio Robaldo and Luca Ducceschi",
  title =        "Phrase detectives: Utilizing collective intelligence
                 for {Internet}-scale language resource creation",
  journal =      j-TIIS,
  volume =       "3",
  number =       "1",
  pages =        "3:1--3:??",
  month =        apr,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:17 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We are witnessing a paradigm shift in Human Language
                 Technology (HLT) that may well have an impact on the
                 field comparable to the statistical revolution:
                 acquiring large-scale resources by exploiting
                 collective intelligence. An illustration of this new
                 approach is Phrase Detectives, an interactive online
                 game with a purpose for creating anaphorically
                 annotated resources that makes use of a highly
                 distributed population of contributors with different
                 levels of expertise. The purpose of this article is to
                 first of all give an overview of all aspects of Phrase
                 Detectives, from the design of the game and the HLT
                 methods we used to the results we have obtained so far.
                 It furthermore summarizes the lessons that we have
                 learned in developing this game which should help other
                 researchers to design and implement similar games.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Console:2013:ISN,
  author =       "Luca Console and Fabrizio Antonelli and Giulia Biamino
                 and Francesca Carmagnola and Federica Cena and Elisa
                 Chiabrando and Vincenzo Cuciti and Matteo Demichelis
                 and Franco Fassio and Fabrizio Franceschi and Roberto
                 Furnari and Cristina Gena and Marina Geymonat and
                 Piercarlo Grimaldi and Pierluige Grillo and Silvia
                 Likavec and Ilaria Lombardi and Dario Mana and
                 Alessandro Marcengo and Michele Mioli and Mario
                 Mirabelli and Monica Perrero and Claudia Picardi and
                 Federica Protti and Amon Rapp and Rossana Simeoni and
                 Daniele Theseider Dupr{\'e} and Ilaria Torre and Andrea
                 Toso and Fabio Torta and Fabiana Vernero",
  title =        "Interacting with social networks of intelligent things
                 and people in the world of gastronomy",
  journal =      j-TIIS,
  volume =       "3",
  number =       "1",
  pages =        "4:1--4:??",
  month =        apr,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:17 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article introduces a framework for creating rich
                 augmented environments based on a social web of
                 intelligent things and people. We target outdoor
                 environments, aiming to transform a region into a smart
                 environment that can share its cultural heritage with
                 people, promoting itself and its special qualities.
                 Using the applications developed in the framework,
                 people can interact with things, listen to the stories
                 that these things tell them, and make their own
                 contributions. The things are intelligent in the sense
                 that they aggregate information provided by users and
                 behave in a socially active way. They can autonomously
                 establish social relationships on the basis of their
                 properties and their interaction with users. Hence when
                 a user gets in touch with a thing, she is also
                 introduced to its social network consisting of other
                 things and of users; she can navigate this network to
                 discover and explore the world around the thing itself.
                 Thus the system supports serendipitous navigation in a
                 network of things and people that evolves according to
                 the behavior of users. An innovative interaction model
                 was defined that allows users to interact with objects
                 in a natural, playful way using smartphones without the
                 need for a specially created infrastructure. The
                 framework was instantiated into a suite of applications
                 called WantEat, in which objects from the domain of
                 tourism and gastronomy (such as cheese wheels or
                 bottles of wine) are taken as testimonials of the
                 cultural roots of a region. WantEat includes an
                 application that allows the definition and registration
                 of things, a mobile application that allows users to
                 interact with things, and an application that supports
                 stakeholders in getting feedback about the things that
                 they have registered in the system. WantEat was
                 developed and tested in a real-world context which
                 involved a region and gastronomy-related items from it
                 (such as products, shops, restaurants, and recipes),
                 through an early evaluation with stakeholders and a
                 final evaluation with hundreds of users.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Song:2013:PII,
  author =       "Wei Song and Andrew Finch and Kumiko Tanaka-Ishii and
                 Keiji Yasuda and Eiichiro Sumita",
  title =        "{picoTrans}: an intelligent icon-driven interface for
                 cross-lingual communication",
  journal =      j-TIIS,
  volume =       "3",
  number =       "1",
  pages =        "5:1--5:??",
  month =        apr,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Apr 30 18:37:17 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "picoTrans is a prototype system that introduces a
                 novel icon-based paradigm for cross-lingual
                 communication on mobile devices. Our approach marries a
                 machine translation system with the popular picture
                 book. Users interact with picoTrans by pointing at
                 pictures as if it were a picture book; the system
                 generates natural language from these icons and the
                 user is able to interact with the icon sequence to
                 refine the meaning of the words that are generated.
                 When users are satisfied that the sentence generated
                 represents what they wish to express, they tap a
                 translate button and picoTrans displays the
                 translation. Structuring the process of communication
                 in this way has many advantages. First, tapping icons
                 is a very natural method of user input on mobile
                 devices; typing is cumbersome and speech input
                 errorful. Second, the sequence of icons which is
                 annotated both with pictures and bilingually with words
                 is meaningful to both users, and it opens up a second
                 channel of communication between them that conveys the
                 gist of what is being expressed. We performed a number
                 of evaluations of picoTrans to determine: its coverage
                 of a corpus of in-domain sentences; the input
                 efficiency in terms of the number of key presses
                 required relative to text entry; and users' overall
                 impressions of using the system compared to using a
                 picture book. Our results show that we are able to
                 cover 74\% of the expressions in our test corpus using
                 a 2000-icon set; we believe that this icon set size is
                 realistic for a mobile device. We also found that
                 picoTrans requires fewer key presses than typing the
                 input and that the system is able to predict the
                 correct, intended natural language sentence from the
                 icon sequence most of the time, making user interaction
                 with the icon sequence often unnecessary. In the user
                 evaluation, we found that in general users prefer using
                 picoTrans and are able to communicate more rapidly and
                 expressively. Furthermore, users had more confidence
                 that they were able to communicate effectively using
                 picoTrans.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Schreiber:2013:ISI,
  author =       "Daniel Schreiber and Kris Luyten and Max
                 M{\"u}hlh{\"a}user and Oliver Brdiczka and Melanie
                 Hartman",
  title =        "Introduction to the special issue on interaction with
                 smart objects",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499475",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Smart objects can be smart because of the information
                 and communication technology that is added to
                 human-made artifacts. It is not, however, the
                 technology itself that makes them smart but rather the
                 way in which the technology is integrated, and their
                 smartness surfaces through how people are able to
                 interact with these objects. Hence, the key challenge
                 for making smart objects successful is to design usable
                 and useful interactions with them. We list five
                 features that can contribute to the smartness of an
                 object, and we discuss how smart objects can help
                 resolve the simplicity-featurism paradox. We conclude
                 by introducing the three articles in this special
                 issue, which dive into various aspects of smart object
                 interaction: augmenting objects with projection,
                 service-oriented interaction with smart objects via a
                 mobile portal, and an analysis of input-output
                 relations in interaction with tangible smart objects.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Molyneaux:2013:CAM,
  author =       "David Molyneaux and Hans Gellersen and Joe Finney",
  title =        "Cooperative augmentation of mobile smart objects with
                 projected displays",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499476",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Sensors, processors, and radios can be integrated
                 invisibly into objects to make them smart and sensitive
                 to user interaction, but feedback is often limited to
                 beeps, blinks, or buzzes. We propose to redress this
                 input-output imbalance by augmentation of smart objects
                 with projected displays, that-unlike physical
                 displays-allow seamless integration with the natural
                 appearance of an object. In this article, we
                 investigate how, in a ubiquitous computing world, smart
                 objects can acquire and control a projection. We
                 consider that projectors and cameras are ubiquitous in
                 the environment, and we develop a novel conception and
                 system that enables smart objects to spontaneously
                 associate with projector-camera systems for cooperative
                 augmentation. Projector-camera systems are conceived as
                 generic, supporting standard computer vision methods
                 for different appearance cues, and smart objects
                 provide a model of their appearance for method
                 selection at runtime, as well as sensor observations to
                 constrain the visual detection process. Cooperative
                 detection results in accurate location and pose of the
                 object, which is then tracked for visual augmentation
                 in response to display requests by the smart object. In
                 this article, we define the conceptual framework
                 underlying our approach; report on computer vision
                 experiments that give original insight into natural
                 appearance-based detection of everyday objects; show
                 how object sensing can be used to increase speed and
                 robustness of visual detection; describe and evaluate a
                 fully implemented system; and describe two smart object
                 applications to illustrate the system's cooperative
                 augmentation process and the embodied interactions it
                 enables with smart objects.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Thebault:2013:ESP,
  author =       "Pierrick Thebault and Dominique Decotter and Mathieu
                 Boussard and Monique Lu",
  title =        "Embodying services into physical places: Toward the
                 design of a mobile environment browser",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499477",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The tremendous developments in mobile computing and
                 handheld devices have allowed for an increasing usage
                 of the resources of the World Wide Web. People today
                 consume information and services on the go, through
                 smart phones applications capable of exploiting their
                 location in order to adapt the content according to the
                 context of use. As location-based services gain
                 traction and reveal their limitations, we argue there
                 is a need for intelligent systems to be created to
                 better support people's activities in their experience
                 of the city, especially regarding their decision-making
                 processes. In this article, we explore the opportunity
                 to move closer to the realization of the ubiquitous
                 computing vision by turning physical places into smart
                 environments capable of cooperatively and autonomously
                 collecting, processing, and transporting information
                 about their characteristics (e.g., practical
                 information, presence of people, and ambience).
                 Following a multidisciplinary approach which leverages
                 psychology, design, and computer science, we propose to
                 investigate the potential of building communication and
                 interaction spaces, called information spheres, on top
                 of physical places such as businesses, homes, and
                 institutions. We argue that, if the latter are exposed
                 on the Web, they can act as a platform delivering
                 information and services and mediating interactions
                 with smart objects without requiring too much effort
                 for the deployment of the architecture. After
                 presenting the inherent challenges of our vision, we go
                 through the protocol of two preliminary experiments
                 that aim to evaluate users' perception of different
                 types of information (i.e., reviews, check-in
                 information, video streams, and real-time
                 representations) and their influence on the
                 decision-making process. Results of this study lead us
                 to elaborate the design considerations that must be
                 taken into account to ensure the intelligibility and
                 user acceptance of information spheres. We finally
                 describe a research prototype application called
                 Environment Browser (Env-B) and present the underlying
                 smart space middleware, before evaluating the user
                 experience with our system through quantitative and
                 qualitative methods.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{vandeGarde-Perik:2013:AIO,
  author =       "Evelien van de Garde-Perik and Serge Offermans and
                 Koen van Boerdonk and Kars-Michiel Lenssen and Elise
                 van den Hoven",
  title =        "An analysis of input-output relations in interaction
                 with smart tangible objects",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499478",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article focuses on the conceptual relation
                 between the user's input and a system's output in
                 interaction with smart tangible objects. Understanding
                 this input-output relation (IO relation) is a
                 prerequisite for the design of meaningful interaction.
                 A meaningful IO relation allows the user to know what
                 to do with a system to achieve a certain goal and to
                 evaluate the outcome. The work discussed in this
                 article followed a design research process in which
                 four concepts were developed and prototyped. An
                 evaluation was performed using these prototypes to
                 investigate the effect of highly different IO relations
                 on the user's understanding of the interaction. The
                 evaluation revealed two types of IO relations differing
                 in functionality and the number of mappings between the
                 user and system actions. These two types of relations
                 are described by two IO models that provide an overview
                 of these mappings. Furthermore, they illustrate the
                 role of the user and the influence of the system in the
                 process of understanding the interaction. The analysis
                 of the two types of IO models illustrates the value of
                 understanding IO relations for the design of smart
                 tangible objects.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Andre:2013:ISS,
  author =       "Elisabeth Andr{\'e} and Joyce Chai",
  title =        "Introduction to the special section on eye gaze and
                 conversation",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499479",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction first explains the origin
                 of this special section. It then outlines how each of
                 the two articles included sheds light on possibilities
                 for conversational dialog systems to use eye gaze as a
                 signal that reflects aspects of participation in the
                 dialog: degree of engagement and turn taking behavior,
                 respectively.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Ishii:2013:GAC,
  author =       "Ryo Ishii and Yukiko I. Nakano and Toyoaki Nishida",
  title =        "Gaze awareness in conversational agents: Estimating a
                 user's conversational engagement from eye gaze",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499480",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In face-to-face conversations, speakers are
                 continuously checking whether the listener is engaged
                 in the conversation, and they change their
                 conversational strategy if the listener is not fully
                 engaged. With the goal of building a conversational
                 agent that can adaptively control conversations, in
                 this study we analyze listener gaze behaviors and
                 develop a method for estimating whether a listener is
                 engaged in the conversation on the basis of these
                 behaviors. First, we conduct a Wizard-of-Oz study to
                 collect information on a user's gaze behaviors. We then
                 investigate how conversational disengagement, as
                 annotated by human judges, correlates with gaze
                 transition, mutual gaze (eye contact) occurrence, gaze
                 duration, and eye movement distance. On the basis of
                 the results of these analyses, we identify useful
                 information for estimating a user's disengagement and
                 establish an engagement estimation method using a
                 decision tree technique. The results of these analyses
                 show that a model using the features of gaze
                 transition, mutual gaze occurrence, gaze duration, and
                 eye movement distance provides the best performance and
                 can estimate the user's conversational engagement
                 accurately. The estimation model is then implemented as
                 a real-time disengagement judgment mechanism and
                 incorporated into a multimodal dialog manager in an
                 animated conversational agent. This agent is designed
                 to estimate the user's conversational engagement and
                 generate probing questions when the user is distracted
                 from the conversation. Finally, we evaluate the
                 engagement-sensitive agent and find that asking probing
                 questions at the proper times has the expected effects
                 on the user's verbal/nonverbal behaviors during
                 communication with the agent. We also find that our
                 agent system improves the user's impression of the
                 agent in terms of its engagement awareness, behavior
                 appropriateness, conversation smoothness, favorability,
                 and intelligence.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Jokinen:2013:GTT,
  author =       "Kristiina Jokinen and Hirohisa Furukawa and Masafumi
                 Nishida and Seiichi Yamamoto",
  title =        "Gaze and turn-taking behavior in casual conversational
                 interactions",
  journal =      j-TIIS,
  volume =       "3",
  number =       "2",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499474.2499481",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:45 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Eye gaze is an important means for controlling
                 interaction and coordinating the participants' turns
                 smoothly. We have studied how eye gaze correlates with
                 spoken interaction and especially focused on the
                 combined effect of the speech signal and gazing to
                 predict turn taking possibilities. It is well known
                 that mutual gaze is important in the coordination of
                 turn taking in two-party dialogs, and in this article,
                 we investigate whether this fact also holds for
                 three-party conversations. In group interactions, it
                 may be that different features are used for managing
                 turn taking than in two-party dialogs. We collected
                 casual conversational data and used an eye tracker to
                 systematically observe a participant's gaze in the
                 interactions. By studying the combined effect of speech
                 and gaze on turn taking, we aimed to answer our main
                 questions: How well can eye gaze help in predicting
                 turn taking? What is the role of eye gaze when the
                 speaker holds the turn? Is the role of eye gaze as
                 important in three-party dialogs as in two-party
                 dialogue? We used Support Vector Machines (SVMs) to
                 classify turn taking events with respect to speech and
                 gaze features, so as to estimate how well the features
                 signal a change of the speaker or a continuation of the
                 same speaker. The results confirm the earlier
                 hypothesis that eye gaze significantly helps in
                 predicting the partner's turn taking activity, and we
                 also get supporting evidence for our hypothesis that
                 the speaker is a prominent coordinator of the
                 interaction space. Such a turn taking model could be
                 used in interactive applications to improve the
                 system's conversational performance.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Jameson:2013:MJR,
  author =       "Anthony Jameson",
  title =        "In Memoriam: {John Riedl}",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2533670.2533671",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This recollection of John Riedl, founding
                 coeditor-in-chief of the ACM Transactions on
                 Interactive Intelligent Systems, presents a picture by
                 editors of the journal of what it was like to
                 collaborate and interact with him.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Amershi:2013:LAW,
  author =       "Saleema Amershi and Jalal Mahmud and Jeffrey Nichols
                 and Tessa Lau and German Attanasio Ruiz",
  title =        "{LiveAction}: Automating {Web} Task Model Generation",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2533670.2533672",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Task automation systems promise to increase human
                 productivity by assisting us with our mundane and
                 difficult tasks. These systems often rely on people to
                 (1) identify the tasks they want automated and (2)
                 specify the procedural steps necessary to accomplish
                 those tasks (i.e., to create task models). However, our
                 interviews with users of a Web task automation system
                 reveal that people find it difficult to identify tasks
                 to automate and most do not even believe they perform
                 repetitive tasks worthy of automation. Furthermore,
                 even when automatable tasks are identified, the
                 well-recognized difficulties of specifying task steps
                 often prevent people from taking advantage of these
                 automation systems. In this research, we analyze real
                 Web usage data and find that people do in fact repeat
                 behaviors on the Web and that automating these
                 behaviors, regardless of their complexity, would reduce
                 the overall number of actions people need to perform
                 when completing their tasks, potentially saving time.
                 Motivated by these findings, we developed LiveAction, a
                 fully-automated approach to generating task models from
                 Web usage data. LiveAction models can be used to
                 populate the task model repositories required by many
                 automation systems, helping us take advantage of
                 automation in our everyday lives.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Wetzler:2013:CPM,
  author =       "Philipp Wetzler and Steven Bethard and Heather Leary
                 and Kirsten Butcher and Soheil Danesh Bahreini and Jin
                 Zhao and James H. Martin and Tamara Sumner",
  title =        "Characterizing and Predicting the Multifaceted Nature
                 of Quality in Educational {Web} Resources",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2533670.2533673",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Efficient learning from Web resources can depend on
                 accurately assessing the quality of each resource. We
                 present a methodology for developing computational
                 models of quality that can assist users in assessing
                 Web resources. The methodology consists of four steps:
                 (1) a meta-analysis of previous studies to decompose
                 quality into high-level dimensions and low-level
                 indicators, (2) an expert study to identify the key
                 low-level indicators of quality in the target domain,
                 (3) human annotation to provide a collection of example
                 resources where the presence or absence of quality
                 indicators has been tagged, and (4) training of a
                 machine learning model to predict quality indicators
                 based on content and link features of Web resources. We
                 find that quality is a multifaceted construct, with
                 different aspects that may be important to different
                 users at different times. We show that machine learning
                 models can predict this multifaceted nature of quality,
                 both in the context of aiding curators as they evaluate
                 resources submitted to digital libraries, and in the
                 context of aiding teachers as they develop online
                 educational resources. Finally, we demonstrate how
                 computational models of quality can be provided as a
                 service, and embedded into applications such as Web
                 search.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Amir:2013:PRV,
  author =       "Ofra Amir and Ya'akov (Kobi) Gal",
  title =        "Plan Recognition and Visualization in Exploratory
                 Learning Environments",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2533670.2533674",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Modern pedagogical software is open-ended and
                 flexible, allowing students to solve problems through
                 exploration and trial-and-error. Such exploratory
                 settings provide for a rich educational environment for
                 students, but they challenge teachers to keep track of
                 students' progress and to assess their performance.
                 This article presents techniques for recognizing
                 students' activities in such pedagogical software and
                 visualizing these activities to teachers. It describes
                 a new plan recognition algorithm that uses a recursive
                 grammar that takes into account repetition and
                 interleaving of activities. This algorithm was
                 evaluated empirically using an exploratory environment
                 for teaching chemistry used by thousands of students in
                 several countries. It was always able to correctly
                 infer students' plans when the appropriate grammar was
                 available. We designed two methods for visualizing
                 students' activities for teachers: one that visualizes
                 students' inferred plans, and one that visualizes
                 students' interactions over a timeline. Both of these
                 visualization methods were preferred to and found more
                 helpful than a baseline method which showed a movie of
                 students' interactions. These results demonstrate the
                 benefit of combining novel AI techniques and
                 visualization methods for the purpose of designing
                 collaborative systems that support students in their
                 problem solving and teachers in their understanding of
                 students' performance.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Chen:2013:HDM,
  author =       "Li Chen and Marco de Gemmis and Alexander Felfernig
                 and Pasquale Lops and Francesco Ricci and Giovanni
                 Semeraro",
  title =        "Human Decision Making and Recommender Systems",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2533670.2533675",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Recommender systems have already proved to be valuable
                 for coping with the information overload problem in
                 several application domains. They provide people with
                 suggestions for items which are likely to be of
                 interest for them; hence, a primary function of
                 recommender systems is to help people make good choices
                 and decisions. However, most previous research has
                 focused on recommendation techniques and algorithms,
                 and less attention has been devoted to the decision
                 making processes adopted by the users and possibly
                 supported by the system. There is still a gap between
                 the importance that the community gives to the
                 assessment of recommendation algorithms and the current
                 range of ongoing research activities concerning human
                 decision making. Different decision-psychological
                 phenomena can influence the decision making of users of
                 recommender systems, and research along these lines is
                 becoming increasingly important and popular. This
                 special issue highlights how the coupling of
                 recommendation algorithms with the understanding of
                 human choice and decision making theory has the
                 potential to benefit research and practice on
                 recommender systems and to enable users to achieve a
                 good balance between decision accuracy and decision
                 effort.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Dodson:2013:ELA,
  author =       "Thomas Dodson and Nicholas Mattei and Joshua T. Guerin
                 and Judy Goldsmith",
  title =        "An {English}-Language Argumentation Interface for
                 Explanation Generation with {Markov} Decision Processes
                 in the Domain of Academic Advising",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2513564",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "A Markov Decision Process (MDP) policy presents, for
                 each state, an action, which preferably maximizes the
                 expected utility accrual over time. In this article, we
                 present a novel explanation system for MDP policies.
                 The system interactively generates conversational
                 English-language explanations of the actions suggested
                 by an optimal policy, and does so in real time. We rely
                 on natural language explanations in order to build
                 trust between the user and the explanation system,
                 leveraging existing research in psychology in order to
                 generate salient explanations. Our explanation system
                 is designed for portability between domains and uses a
                 combination of domain-specific and domain-independent
                 techniques. The system automatically extracts implicit
                 knowledge from an MDP model and accompanying policy.
                 This MDP-based explanation system can be ported between
                 applications without additional effort by knowledge
                 engineers or model builders. Our system separates
                 domain-specific data from the explanation logic,
                 allowing for a robust system capable of incremental
                 upgrades. Domain-specific explanations are generated
                 through case-based explanation techniques specific to
                 the domain and a knowledge base of concept mappings
                 used to generate English-language explanations.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Freyne:2013:RBP,
  author =       "Jill Freyne and Shlomo Berkovsky and Gregory Smith",
  title =        "Rating Bias and Preference Acquisition",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "19:1--19:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499673",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Personalized systems and recommender systems exploit
                 implicitly and explicitly provided user information to
                 address the needs and requirements of those using their
                 services. User preference information, often in the
                 form of interaction logs and ratings data, is used to
                 identify similar users, whose opinions are leveraged to
                 inform recommendations or to filter information. In
                 this work we explore a different dimension of
                 information trends in user bias and reasoning learned
                 from ratings provided by users to a recommender system.
                 Our work examines the characteristics of a dataset of
                 100,000 user ratings on a corpus of recipes, which
                 illustrates stable user bias towards certain features
                 of the recipes (cuisine type, key ingredient, and
                 complexity). We exploit this knowledge to design and
                 evaluate a personalized rating acquisition tool based
                 on active learning, which leverages user biases in
                 order to obtain ratings bearing high-value information
                 and to reduce prediction errors with new users.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Knijnenburg:2013:MDA,
  author =       "Bart P. Knijnenburg and Alfred Kobsa",
  title =        "Making Decisions about Privacy: Information Disclosure
                 in Context-Aware Recommender Systems",
  journal =      j-TIIS,
  volume =       "3",
  number =       "3",
  pages =        "20:1--20:??",
  month =        oct,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499670",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:47 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Recommender systems increasingly use contextual and
                 demographical data as a basis for recommendations.
                 Users, however, often feel uncomfortable providing such
                 information. In a privacy-minded design of
                 recommenders, users are free to decide for themselves
                 what data they want to disclose about themselves. But
                 this decision is often complex and burdensome, because
                 the consequences of disclosing personal information are
                 uncertain or even unknown. Although a number of
                 researchers have tried to analyze and facilitate such
                 information disclosure decisions, their research
                 results are fragmented, and they often do not hold up
                 well across studies. This article describes a unified
                 approach to privacy decision research that describes
                 the cognitive processes involved in users' ``privacy
                 calculus'' in terms of system-related perceptions and
                 experiences that act as mediating factors to
                 information disclosure. The approach is applied in an
                 online experiment with 493 participants using a mock-up
                 of a context-aware recommender system. Analyzing the
                 results with a structural linear model, we demonstrate
                 that personal privacy concerns and disclosure
                 justification messages affect the perception of and
                 experience with a system, which in turn drive
                 information disclosure decisions. Overall, disclosure
                 justification messages do not increase disclosure.
                 Although they are perceived to be valuable, they
                 decrease users' trust and satisfaction. Another result
                 is that manipulating the order of the requests
                 increases the disclosure of items requested early but
                 decreases the disclosure of items requested later.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Apostolopoulos:2014:IOL,
  author =       "Ilias Apostolopoulos and Navid Fallah and Eelke Folmer
                 and Kostas E. Bekris",
  title =        "Integrated online localization and navigation for
                 people with visual impairments using smart phones",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499669",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Indoor localization and navigation systems for
                 individuals with Visual Impairments (VIs) typically
                 rely upon extensive augmentation of the physical space,
                 significant computational resources, or heavy and
                 expensive sensors; thus, few systems have been
                 implemented on a large scale. This work describes a
                 system able to guide people with VIs through indoor
                 environments using inexpensive sensors, such as
                 accelerometers and compasses, which are available in
                 portable devices like smart phones. The method takes
                 advantage of feedback from the human user, who confirms
                 the presence of landmarks, something that users with
                 VIs already do when navigating in a building. The
                 system calculates the user's location in real time and
                 uses it to provide audio instructions on how to reach
                 the desired destination. Initial early experiments
                 suggested that the accuracy of the localization depends
                 on the type of directions and the availability of an
                 appropriate transition model for the user. A critical
                 parameter for the transition model is the user's step
                 length. Consequently, this work also investigates
                 different schemes for automatically computing the
                 user's step length and reducing the dependence of the
                 approach on the definition of an accurate transition
                 model. In this way, the direction provision method is
                 able to use the localization estimate and adapt to
                 failed executions of paths by the users. Experiments
                 are presented that evaluate the accuracy of the overall
                 integrated system, which is executed online on a smart
                 phone. Both people with VIs and blindfolded sighted
                 people participated in the experiments, which included
                 paths along multiple floors that required the use of
                 stairs and elevators.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Zamborlin:2014:FGI,
  author =       "Bruno Zamborlin and Frederic Bevilacqua and Marco
                 Gillies and Mark D'inverno",
  title =        "Fluid gesture interaction design: Applications of
                 continuous recognition for the design of modern
                 gestural interfaces",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2543921",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article presents Gesture Interaction DEsigner
                 (GIDE), an innovative application for gesture
                 recognition. Instead of recognizing gestures only after
                 they have been entirely completed, as happens in
                 classic gesture recognition systems, GIDE exploits the
                 full potential of gestural interaction by tracking
                 gestures continuously and synchronously, allowing users
                 to both control the target application moment to moment
                 and also receive immediate and synchronous feedback
                 about system recognition states. By this means, they
                 quickly learn how to interact with the system in order
                 to develop better performances. Furthermore, rather
                 than learning the predefined gestures of others, GIDE
                 allows users to design their own gestures, making
                 interaction more natural and also allowing the
                 applications to be tailored by users' specific needs.
                 We describe our system that demonstrates these new
                 qualities-that combine to provide fluid gesture
                 interaction design-through evaluations with a range of
                 performers and artists.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Young:2014:DET,
  author =       "James E. Young and Takeo Igarashi and Ehud Sharlin and
                 Daisuke Sakamoto and Jeffrey Allen",
  title =        "Design and evaluation techniques for authoring
                 interactive and stylistic behaviors",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499671",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present a series of projects for end-user authoring
                 of interactive robotic behaviors, with a particular
                 focus on the style of those behaviors: we call this
                 approach Style-by-Demonstration (SBD). We provide an
                 overview introduction of three different SBD platforms:
                 SBD for animated character interactive locomotion
                 paths, SBD for interactive robot locomotion paths, and
                 SBD for interactive robot dance. The primary
                 contribution of this article is a detailed
                 cross-project SBD analysis of the interaction designs
                 and evaluation approaches employed, with the goal of
                 providing general guidelines stemming from our
                 experiences, for both developing and evaluating SBD
                 systems. In addition, we provide the first full account
                 of our Puppet Master SBD algorithm, with an explanation
                 of how it evolved through the projects.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kumar:2014:TES,
  author =       "Rohit Kumar and Carolyn P. Ros{\'e}",
  title =        "Triggering effective social support for online
                 groups",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "24:1--24:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2499672",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Conversational agent technology is an emerging
                 paradigm for creating a social environment in online
                 groups that is conducive to effective teamwork. Prior
                 work has demonstrated advantages in terms of learning
                 gains and satisfaction scores when groups learning
                 together online have been supported by conversational
                 agents that employ Balesian social strategies. This
                 prior work raises two important questions that are
                 addressed in this article. The first question is one of
                 generality. Specifically, are the positive effects of
                 the designed support specific to learning contexts? Or
                 are they in evidence in other collaborative task
                 domains as well? We present a study conducted within a
                 collaborative decision-making task where we see that
                 the positive effects of the Balesian social strategies
                 extend to this new context. The second question is
                 whether it is possible to increase the effectiveness of
                 the Balesian social strategies by increasing the
                 context sensitivity with which the social strategies
                 are triggered. To this end, we present technical work
                 that increases the sensitivity of the triggering. Next,
                 we present a user study that demonstrates an
                 improvement in performance of the support agent with
                 the new, more sensitive triggering policy over the
                 baseline approach from prior work. The technical
                 contribution of this article is that we extend prior
                 work where such support agents were modeled using a
                 composition of conversational behaviors integrated
                 within an event-driven framework. Within the present
                 approach, conversation is orchestrated through
                 context-sensitive triggering of the composed behaviors.
                 The core effort involved in applying this approach
                 involves building a set of triggering policies that
                 achieve this orchestration in a time-sensitive and
                 coherent manner. In line with recent developments in
                 data-driven approaches for building dialog systems, we
                 present a novel technique for learning
                 behavior-specific triggering policies, deploying it as
                 part of our efforts to improve a socially capable
                 conversational tutor agent that supports collaborative
                 learning.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kritikos:2014:TMD,
  author =       "K. Kritikos and D. Plexousakis and F. Patern{\`o}",
  title =        "Task model-driven realization of interactive
                 application functionality through services",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "25:1--25:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2559979",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The Service-Oriented Computing (SOC) paradigm is
                 currently being adopted by many developers, as it
                 promises the construction of applications through reuse
                 of existing Web Services (WSs). However, current SOC
                 tools produce applications that interact with users in
                 a limited way. This limitation is overcome by
                 model-based Human-Computer Interaction (HCI) approaches
                 that support the development of applications whose
                 functionality is realized with WSs and whose User
                 Interface (UI) is adapted to the user's context.
                 Typically, such approaches do not consider various
                 functional issues, such as the applications' semantics
                 and their syntactic robustness in terms of the WSs
                 selected to implement their functionality and the
                 automation of the service discovery and selection
                 processes. To this end, we propose a model-driven
                 design method for interactive service-based
                 applications that is able to consider the functional
                 issues and their implications for the UI. This method
                 is realized by a semiautomatic environment that can be
                 integrated into current model-based HCI tools to
                 complete the development of interactive service
                 front-ends. The proposed method takes as input an HCI
                 task model, which includes the user's view of the
                 interactive system, and produces a concrete service
                 model that describes how existing services can be
                 combined to realize the application's functionality. To
                 achieve its goal, our method first transforms system
                 tasks into semantic service queries by mapping the task
                 objects onto domain ontology concepts; then it sends
                 each resulting query to a semantic service engine so as
                 to discover the corresponding services. In the end,
                 only one service from those associated with a system
                 task is selected, through the execution of a novel
                 service concretization algorithm that ensures message
                 compatibility between the selected services.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Rafailidis:2014:CBT,
  author =       "Dimitrios Rafailidis and Apostolos Axenopoulos and
                 Jonas Etzold and Stavroula Manolopoulou and Petros
                 Daras",
  title =        "Content-based tag propagation and tensor factorization
                 for personalized item recommendation based on social
                 tagging",
  journal =      j-TIIS,
  volume =       "3",
  number =       "4",
  pages =        "26:1--26:??",
  month =        jan,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2487164",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 13 06:46:49 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In this article, a novel method for personalized item
                 recommendation based on social tagging is presented.
                 The proposed approach comprises a content-based tag
                 propagation method to address the sparsity and ``cold
                 start'' problems, which often occur in social tagging
                 systems and decrease the quality of recommendations.
                 The proposed method exploits (a) the content of items
                 and (b) users' tag assignments through a relevance
                 feedback mechanism in order to automatically identify
                 the optimal number of content-based and conceptually
                 similar items. The relevance degrees between users,
                 tags, and conceptually similar items are calculated in
                 order to ensure accurate tag propagation and
                 consequently to address the issue of ``learning tag
                 relevance.'' Moreover, the ternary relation among
                 users, tags, and items is preserved by performing tag
                 propagation in the form of triplets based on users'
                 personal preferences and ``cold start'' degree. The
                 latent associations among users, tags, and items are
                 revealed based on a tensor factorization model in order
                 to build personalized item recommendations. In our
                 experiments with real-world social data, we show the
                 superiority of the proposed approach over other
                 state-of-the-art methods, since several problems in
                 social tagging systems are successfully tackled.
                 Finally, we present the recommendation methodology in
                 the multimodal engine of I-SEARCH, where users'
                 interaction capabilities are demonstrated.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Callaway:2014:EMD,
  author =       "Charles Callaway and Oliviero Stock and Elyon
                 Dekoven",
  title =        "Experiments with Mobile Drama in an Instrumented
                 Museum for Inducing Conversation in Small Groups",
  journal =      j-TIIS,
  volume =       "4",
  number =       "1",
  pages =        "2:1--2:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2584250",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Apr 12 11:14:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Small groups can have a better museum visit when that
                 visit is both a social and an educational occasion. The
                 unmediated discussion that often ensues during a shared
                 cultural experience, especially when it is with a small
                 group whose members already know each other, has been
                 shown by ethnographers to be important for a more
                 enriching experience. We present DRAMATRIC, a mobile
                 presentation system that delivers hour-long dramas to
                 small groups of museum visitors. DRAMATRIC continuously
                 receives sensor data from the museum environment during
                 a museum visit and analyzes group behavior from that
                 data. On the basis of that analysis, DRAMATRIC delivers
                 a series of dynamically coordinated dramatic scenes
                 about exhibits that the group walks near, each designed
                 to stimulate group discussion. Each drama presentation
                 contains small, complementary differences in the
                 narrative content heard by the different members of the
                 group, leveraging the tension/release cycle of
                 narrative to naturally lead visitors to fill in missing
                 pieces in their own drama by interacting with their
                 fellow group members. Using four specific techniques to
                 produce these coordinated narrative variations, we
                 describe two experiments: one in a neutral, nonmobile
                 environment, and the other a controlled experiment with
                 a full-scale drama in an actual museum. The first
                 experiment tests the hypothesis that narrative
                 differences will lead to increased conversation
                 compared to hearing identical narratives, whereas the
                 second experiment tests whether switching from
                 presenting a drama using one technique to using another
                 technique for the subsequent drama will result in
                 increased conversation. The first experiment shows that
                 hearing coordinated narrative variations can in fact
                 lead to significantly increased conversation. The
                 second experiment also serves as a framework for future
                 studies that evaluate strategies for similar adaptive
                 systems.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Martens:2014:ISI,
  author =       "Jean-Bernard Martens",
  title =        "Interactive Statistics with {Illmo}",
  journal =      j-TIIS,
  volume =       "4",
  number =       "1",
  pages =        "4:1--4:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2509108",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Apr 12 11:14:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Progress in empirical research relies on adequate
                 statistical analysis and reporting. This article
                 proposes an alternative approach to statistical
                 modeling that is based on an old but mostly forgotten
                 idea, namely Thurstone modeling. Traditional
                 statistical methods assume that either the measured
                 data, in the case of parametric statistics, or the
                 rank-order transformed data, in the case of
                 nonparametric statistics, are samples from a specific
                 (usually Gaussian) distribution with unknown
                 parameters. Consequently, such methods should not be
                 applied when this assumption is not valid. Thurstone
                 modeling similarly assumes the existence of an
                 underlying process that obeys an a priori assumed
                 distribution with unknown parameters, but combines this
                 underlying process with a flexible response mechanism
                 that can be either continuous or discrete and either
                 linear or nonlinear. One important advantage of
                 Thurstone modeling is that traditional statistical
                 methods can still be applied on the underlying process,
                 irrespective of the nature of the measured data itself.
                 Another advantage is that Thurstone models can be
                 graphically represented, which helps to communicate
                 them to a broad audience. A new interactive statistical
                 package, Interactive Log Likelihood MOdeling ( Illmo ),
                 was specifically designed for estimating and rendering
                 Thurstone models and is intended to bring Thurstone
                 modeling within the reach of persons who are not
                 experts in statistics. Illmo is unique in the sense
                 that it provides not only extensive graphical
                 renderings of the data analysis results but also an
                 interface for navigating between different model
                 options. In this way, users can interactively explore
                 different models and decide on an adequate balance
                 between model complexity and agreement with the
                 experimental data. Hypothesis testing on model
                 parameters is also made intuitive and is supported by
                 both textual and graphical feedback. The flexibility
                 and ease of use of Illmo means that it is also
                 potentially useful as a didactic tool for teaching
                 statistics.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Riveiro:2014:ENM,
  author =       "Maria Riveiro",
  title =        "Evaluation of Normal Model Visualization for Anomaly
                 Detection in Maritime Traffic",
  journal =      j-TIIS,
  volume =       "4",
  number =       "1",
  pages =        "5:1--5:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2591511",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Apr 12 11:14:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Monitoring dynamic objects in surveillance
                 applications is normally a demanding activity for
                 operators, not only because of the complexity and high
                 dimensionality of the data but also because of other
                 factors like time constraints and uncertainty. Timely
                 detection of anomalous objects or situations that need
                 further investigation may reduce operators' cognitive
                 load. Surveillance applications may include anomaly
                 detection capabilities, but their use is not
                 widespread, as they usually generate a high number of
                 false alarms, they do not provide appropriate cognitive
                 support for operators, and their outcomes can be
                 difficult to comprehend and trust. Visual analytics can
                 bridge the gap between computational and human
                 approaches to detecting anomalous behavior in traffic
                 data, making this process more transparent. As a step
                 toward this goal of transparency, this article presents
                 an evaluation that assesses whether visualizations of
                 normal behavioral models of vessel traffic support two
                 of the main analytical tasks specified during our field
                 work in maritime control centers. The evaluation
                 combines quantitative and qualitative usability
                 assessments. The quantitative evaluation, which was
                 carried out with a proof-of-concept prototype, reveals
                 that participants who used the visualization of normal
                 behavioral models outperformed the group that did not
                 do so. The qualitative assessment shows that domain
                 experts have a positive attitude toward the provision
                 of automatic support and the visualization of normal
                 behavioral models, as these aids may reduce reaction
                 time and increase trust in and comprehensibility of the
                 system.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Chen:2014:EPM,
  author =       "Yingjie Victor Chen and Zhenyu Cheryl Qian and Robert
                 Woodbury and John Dill and Chris D. Shaw",
  title =        "Employing a Parametric Model for Analytic Provenance",
  journal =      j-TIIS,
  volume =       "4",
  number =       "1",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2591510",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Apr 12 11:14:27 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We introduce a propagation-based parametric symbolic
                 model approach to supporting analytic provenance. This
                 approach combines a script language to capture and
                 encode the analytic process and a parametrically
                 controlled symbolic model to represent and reuse the
                 logic of the analysis process. Our approach first
                 appeared in a visual analytics system called CZSaw.
                 Using a script to capture the analyst's interactions at
                 a meaningful system action level allows the creation of
                 a parametrically controlled symbolic model in the form
                 of a Directed Acyclic Graph (DAG). Using the DAG allows
                 propagating changes. Graph nodes correspond to
                 variables in CZSaw scripts, which are results (data and
                 data visualizations) generated from user interactions.
                 The user interacts with variables representing entities
                 or relations to create the next step's results. Graph
                 edges represent dependency relationships among nodes.
                 Any change to a variable triggers the propagation
                 mechanism to update downstream dependent variables and
                 in turn updates data views to reflect the change. The
                 analyst can reuse parts of the analysis process by
                 assigning new values to a node in the graph. We
                 evaluated this symbolic model approach by solving three
                 IEEE VAST Challenge contest problems (from IEEE VAST
                 2008, 2009, and 2010). In each of these challenges, the
                 analyst first created a symbolic model to explore,
                 understand, analyze, and solve a particular subproblem
                 and then reused the model via its dependency graph
                 propagation mechanism to solve similar subproblems.
                 With the script and model, CZSaw supports the analytic
                 provenance by capturing, encoding, and reusing the
                 analysis process. The analyst can recall the
                 chronological states of the analysis process with the
                 CZSaw script and may interpret the underlying rationale
                 of the analysis with the symbolic model.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Chan:2014:RCT,
  author =       "Yu-Hsuan Chan and Carlos D. Correa and Kwan-Liu Ma",
  title =        "{Regression Cube}: a Technique for Multidimensional
                 Visual Exploration and Interactive Pattern Finding",
  journal =      j-TIIS,
  volume =       "4",
  number =       "1",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2590349",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:17:36 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Scatterplots are commonly used to visualize
                 multidimensional data; however, 2D projections of data
                 offer limited understanding of the high-dimensional
                 interactions between data points. We introduce an
                 interactive 3D extension of scatterplots called the
                 Regression Cube (RC), which augments a 3D scatterplot
                 with three facets on which the correlations between the
                 two variables are revealed by sensitivity lines and
                 sensitivity streamlines. The sensitivity visualization
                 of local regression on the 2D projections provides
                 insights about the shape of the data through its
                 orientation and continuity cues. We also introduce a
                 series of visual operations such as clustering,
                 brushing, and selection supported in RC. By iteratively
                 refining the selection of data points of interest, RC
                 is able to reveal salient local correlation patterns
                 that may otherwise remain hidden with a global
                 analysis. We have demonstrated our system with two
                 examples and a user-oriented evaluation, and we show
                 how RCs enable interactive visual exploration of
                 multidimensional datasets via a variety of
                 classification and information retrieval tasks. A video
                 demo of RC is available.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Jawaheer:2014:MUP,
  author =       "Gawesh Jawaheer and Peter Weller and Patty Kostkova",
  title =        "Modeling User Preferences in Recommender Systems: a
                 Classification Framework for Explicit and Implicit User
                 Feedback",
  journal =      j-TIIS,
  volume =       "4",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2512208",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:34 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Recommender systems are firmly established as a
                 standard technology for assisting users with their
                 choices; however, little attention has been paid to the
                 application of the user model in recommender systems,
                 particularly the variability and noise that are an
                 intrinsic part of human behavior and activity. To
                 enable recommender systems to suggest items that are
                 useful to a particular user, it can be essential to
                 understand the user and his or her interactions with
                 the system. These interactions typically manifest
                 themselves as explicit and implicit user feedback that
                 provides the key indicators for modeling users'
                 preferences for items and essential information for
                 personalizing recommendations. In this article, we
                 propose a classification framework for the use of
                 explicit and implicit user feedback in recommender
                 systems based on a set of distinct properties that
                 include Cognitive Effort, User Model, Scale of
                 Measurement, and Domain Relevance. We develop a set of
                 comparison criteria for explicit and implicit user
                 feedback to emphasize the key properties. Using our
                 framework, we provide a classification of recommender
                 systems that have addressed questions about user
                 feedback, and we review state-of-the-art techniques to
                 improve such user feedback and thereby improve the
                 performance of the recommender system. Finally, we
                 formulate challenges for future research on improvement
                 of user feedback.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Fang:2014:CLM,
  author =       "Yi Fang and Ziad Al Bawab and Jean-Francois Crespo",
  title =        "Collaborative Language Models for Localized Query
                 Prediction",
  journal =      j-TIIS,
  volume =       "4",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2622617",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:34 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Localized query prediction (LQP) is the task of
                 estimating web query trends for a specific location.
                 This problem subsumes many interesting personalized web
                 applications such as personalization for buzz query
                 detection, for query expansion, and for query
                 recommendation. These personalized applications can
                 greatly enhance user interaction with web search
                 engines by providing more customized information
                 discovered from user input (i.e., queries), but the LQP
                 task has rarely been investigated in the literature.
                 Although exist abundant work on estimating global web
                 search trends does exist, it often encounters the big
                 challenge of data sparsity when personalization comes
                 into play. In this article, we tackle the LQP task by
                 proposing a series of collaborative language models
                 (CLMs). CLMs alleviate the data sparsity issue by
                 collaboratively collecting queries and trend
                 information from the other locations. The traditional
                 statistical language models assume a fixed background
                 language model, which loses the taste of
                 personalization. In contrast, CLMs are personalized
                 language models with flexible background language
                 models customized to various locations. The most
                 sophisticated CLM enables the collaboration to adapt to
                 specific query topics, which further advances the
                 personalization level. An extensive set of experiments
                 have been conducted on a large-scale web query log to
                 demonstrate the effectiveness of the proposed models.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Castellano:2014:CSA,
  author =       "Ginevra Castellano and Iolanda Leite and Andr{\'e}
                 Pereira and Carlos Martinho and Ana Paiva and Peter W.
                 Mcowan",
  title =        "Context-Sensitive Affect Recognition for a Robotic
                 Game Companion",
  journal =      j-TIIS,
  volume =       "4",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2622615",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:34 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Social perception abilities are among the most
                 important skills necessary for robots to engage humans
                 in natural forms of interaction. Affect-sensitive
                 robots are more likely to be able to establish and
                 maintain believable interactions over extended periods
                 of time. Nevertheless, the integration of affect
                 recognition frameworks in real-time human-robot
                 interaction scenarios is still underexplored. In this
                 article, we propose and evaluate a context-sensitive
                 affect recognition framework for a robotic game
                 companion for children. The robot can automatically
                 detect affective states experienced by children in an
                 interactive chess game scenario. The affect recognition
                 framework is based on the automatic extraction of task
                 features and social interaction-based features.
                 Vision-based indicators of the children's nonverbal
                 behaviour are merged with contextual features related
                 to the game and the interaction and given as input to
                 support vector machines to create a context-sensitive
                 multimodal system for affect recognition. The affect
                 recognition framework is fully integrated in an
                 architecture for adaptive human-robot interaction.
                 Experimental evaluation showed that children's affect
                 can be successfully predicted using a combination of
                 behavioural and contextual data related to the game and
                 the interaction with the robot. It was found that
                 contextual data alone can be used to successfully
                 predict a subset of affective dimensions, such as
                 interest toward the robot. Experiments also showed that
                 engagement with the robot can be predicted using
                 information about the user's valence, interest and
                 anticipatory behaviour. These results provide evidence
                 that social engagement can be modelled as a state
                 consisting of affect and attention components in the
                 context of the interaction.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Steichen:2014:IVT,
  author =       "Ben Steichen and Cristina Conati and Giuseppe
                 Carenini",
  title =        "Inferring Visualization Task Properties, User
                 Performance, and User Cognitive Abilities from Eye Gaze
                 Data",
  journal =      j-TIIS,
  volume =       "4",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2633043",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:34 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Information visualization systems have traditionally
                 followed a one-size-fits-all model, typically ignoring
                 an individual user's needs, abilities, and preferences.
                 However, recent research has indicated that
                 visualization performance could be improved by adapting
                 aspects of the visualization to the individual user. To
                 this end, this article presents research aimed at
                 supporting the design of novel user-adaptive
                 visualization systems. In particular, we discuss
                 results on using information on user eye gaze patterns
                 while interacting with a given visualization to predict
                 properties of the user's visualization task; the user's
                 performance (in terms of predicted task completion
                 time); and the user's individual cognitive abilities,
                 such as perceptual speed, visual working memory, and
                 verbal working memory. We provide a detailed analysis
                 of different eye gaze feature sets, as well as
                 over-time accuracies. We show that these predictions
                 are significantly better than a baseline classifier
                 even during the early stages of visualization usage.
                 These findings are then discussed with a view to
                 designing visualization systems that can adapt to the
                 individual user in real time.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Cuayahuitl:2014:ISI,
  author =       "Heriberto Cuay{\'a}huitl and Lutz Frommberger and Nina
                 Dethlefs and Antoine Raux and Mathew Marge and Hendrik
                 Zender",
  title =        "Introduction to the Special Issue on Machine Learning
                 for Multiple Modalities in Interactive Systems and
                 Robots",
  journal =      j-TIIS,
  volume =       "4",
  number =       "3",
  pages =        "12e:1--12e:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2670539",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 14 17:38:05 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This special issue highlights research articles that
                 apply machine learning to robots and other systems that
                 interact with users through more than one modality,
                 such as speech, gestures, and vision. For example, a
                 robot may coordinate its speech with its actions,
                 taking into account (audio-)visual feedback during
                 their execution. Machine learning provides interactive
                 systems with opportunities to improve performance not
                 only of individual components but also of the system as
                 a whole. However, machine learning methods that
                 encompass multiple modalities of an interactive system
                 are still relatively hard to find. The articles in this
                 special issue represent examples that contribute to
                 filling this gap.",
  acknowledgement = ack-nhfb,
  articleno =    "12e",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Ngo:2014:EIM,
  author =       "Hung Ngo and Matthew Luciw and Jawas Nagi and
                 Alexander Forster and J{\"u}rgen Schmidhuber and Ngo
                 Anh Vien",
  title =        "Efficient Interactive Multiclass Learning from Binary
                 Feedback",
  journal =      j-TIIS,
  volume =       "4",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2629631",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:36 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We introduce a novel algorithm called upper confidence
                 --- weighted learning (UCWL) for online multiclass
                 learning from binary feedback (e.g., feedback that
                 indicates whether the prediction was right or wrong).
                 UCWL combines the upper confidence bound (UCB)
                 framework with the soft confidence-weighted (SCW)
                 online learning scheme. In UCB, each instance is
                 classified using both score and uncertainty. For a
                 given instance in the sequence, the algorithm might
                 guess its class label primarily to reduce the class
                 uncertainty. This is a form of informed exploration,
                 which enables the performance to improve with lower
                 sample complexity compared to the case without
                 exploration. Combining UCB with SCW leads to the
                 ability to deal well with noisy and nonseparable data,
                 and state-of-the-art performance is achieved without
                 increasing the computational cost. A potential
                 application setting is human-robot interaction (HRI),
                 where the robot is learning to classify some set of
                 inputs while the human teaches it by providing only
                 binary feedback-or sometimes even the wrong answer
                 entirely. Experimental results in the HRI setting and
                 with two benchmark datasets from other settings show
                 that UCWL outperforms other state-of-the-art algorithms
                 in the online binary feedback setting-and surprisingly
                 even sometimes outperforms state-of-the-art algorithms
                 that get full feedback (e.g., the true class label),
                 whereas UCWL gets only binary feedback on the same data
                 sequence.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Benotti:2014:INL,
  author =       "Luciana Benotti and Tessa Lau and Mart{\'\i}n
                 Villalba",
  title =        "Interpreting Natural Language Instructions Using
                 Language, Vision, and Behavior",
  journal =      j-TIIS,
  volume =       "4",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2629632",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Sep 13 13:15:36 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We define the problem of automatic instruction
                 interpretation as follows. Given a natural language
                 instruction, can we automatically predict what an
                 instruction follower, such as a robot, should do in the
                 environment to follow that instruction? Previous
                 approaches to automatic instruction interpretation have
                 required either extensive domain-dependent rule writing
                 or extensive manually annotated corpora. This article
                 presents a novel approach that leverages a large amount
                 of unannotated, easy-to-collect data from humans
                 interacting in a game-like environment. Our approach
                 uses an automatic annotation phase based on artificial
                 intelligence planning, for which two different
                 annotation strategies are compared: one based on
                 behavioral information and the other based on
                 visibility information. The resulting annotations are
                 used as training data for different automatic
                 classifiers. This algorithm is based on the intuition
                 that the problem of interpreting a situated instruction
                 can be cast as a classification problem of choosing
                 among the actions that are possible in the situation.
                 Classification is done by combining language, vision,
                 and behavior information. Our empirical analysis shows
                 that machine learning classifiers achieve 77\% accuracy
                 on this task on available English corpora and 74\% on
                 similar German corpora. Finally, the inclusion of human
                 feedback in the interpretation process is shown to
                 boost performance to 92\% for the English corpus and
                 90\% for the German corpus.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Keizer:2014:MLS,
  author =       "Simon Keizer and Mary Ellen Foster and Zhuoran Wang
                 and Oliver Lemon",
  title =        "Machine Learning for Social Multiparty Human--Robot
                 Interaction",
  journal =      j-TIIS,
  volume =       "4",
  number =       "3",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2600021",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 14 17:38:05 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We describe a variety of machine-learning techniques
                 that are being applied to social multiuser human--robot
                 interaction using a robot bartender in our scenario. We
                 first present a data-driven approach to social state
                 recognition based on supervised learning. We then
                 describe an approach to social skills execution-that
                 is, action selection for generating socially
                 appropriate robot behavior-which is based on
                 reinforcement learning, using a data-driven simulation
                 of multiple users to train execution policies for
                 social skills. Next, we describe how these components
                 for social state recognition and skills execution have
                 been integrated into an end-to-end robot bartender
                 system, and we discuss the results of a user
                 evaluation. Finally, we present an alternative
                 unsupervised learning framework that combines social
                 state recognition and social skills execution based on
                 hierarchical Dirichlet processes and an infinite POMDP
                 interaction manager. The models make use of data from
                 both human--human interactions collected in a number of
                 German bars and human--robot interactions recorded in
                 the evaluation of an initial version of the system.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Cuayahuitl:2014:NHR,
  author =       "Heriberto Cuay{\'a}huitl and Ivana
                 Kruijff-Korbayov{\'a} and Nina Dethlefs",
  title =        "Nonstrict Hierarchical Reinforcement Learning for
                 Interactive Systems and Robots",
  journal =      j-TIIS,
  volume =       "4",
  number =       "3",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2659003",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 14 17:38:05 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Conversational systems and robots that use
                 reinforcement learning for policy optimization in large
                 domains often face the problem of limited scalability.
                 This problem has been addressed either by using
                 function approximation techniques that estimate the
                 approximate true value function of a policy or by using
                 a hierarchical decomposition of a learning task into
                 subtasks. We present a novel approach for dialogue
                 policy optimization that combines the benefits of both
                 hierarchical control and function approximation and
                 that allows flexible transitions between dialogue
                 subtasks to give human users more control over the
                 dialogue. To this end, each reinforcement learning
                 agent in the hierarchy is extended with a subtask
                 transition function and a dynamic state space to allow
                 flexible switching between subdialogues. In addition,
                 the subtask policies are represented with linear
                 function approximation in order to generalize the
                 decision making to situations unseen in training. Our
                 proposed approach is evaluated in an interactive
                 conversational robot that learns to play quiz games.
                 Experimental results, using simulation and real users,
                 provide evidence that our proposed approach can lead to
                 more flexible (natural) interactions than strict
                 hierarchical control and that it is preferred by human
                 users.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Bulling:2015:ISI,
  author =       "Andreas Bulling and Ulf Blanke and Desney Tan and Jun
                 Rekimoto and Gregory Abowd",
  title =        "Introduction to the Special Issue on Activity
                 Recognition for Interaction",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2694858",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction describes the aims and
                 scope of the ACM Transactions on Interactive
                 Intelligent Systems special issue on Activity
                 Recognition for Interaction. It explains why activity
                 recognition is becoming crucial as part of the cycle of
                 interaction between users and computing systems, and it
                 shows how the five articles selected for this special
                 issue reflect this theme.",
  acknowledgement = ack-nhfb,
  articleno =    "16e",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Ye:2015:UUS,
  author =       "Juan Ye and Graeme Stevenson and Simon Dobson",
  title =        "{USMART}: an Unsupervised Semantic Mining Activity
                 Recognition Technique",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2662870",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Recognising high-level human activities from low-level
                 sensor data is a crucial driver for pervasive systems
                 that wish to provide seamless and distraction-free
                 support for users engaged in normal activities.
                 Research in this area has grown alongside advances in
                 sensing and communications, and experiments have
                 yielded sensor traces coupled with ground truth
                 annotations about the underlying environmental
                 conditions and user actions. Traditional machine
                 learning has had some success in recognising human
                 activities; but the need for large volumes of annotated
                 data and the danger of overfitting to specific
                 conditions represent challenges in connection with the
                 building of models applicable to a wide range of users,
                 activities, and environments. We present USMART, a
                 novel unsupervised technique that combines data- and
                 knowledge-driven techniques. USMART uses a general
                 ontology model to represent domain knowledge that can
                 be reused across different environments and users, and
                 we augment a range of learning techniques with
                 ontological semantics to facilitate the unsupervised
                 discovery of patterns in how each user performs daily
                 activities. We evaluate our approach against four
                 real-world third-party datasets featuring different
                 user populations and sensor configurations, and we find
                 that USMART achieves up to 97.5\% accuracy in
                 recognising daily activities.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Dim:2015:ADS,
  author =       "Eyal Dim and Tsvi Kuflik",
  title =        "Automatic Detection of Social Behavior of Museum
                 Visitor Pairs",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2662869",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In many cases, visitors come to a museum in small
                 groups. In such cases, the visitors' social context has
                 an impact on their museum visit experience. Knowing the
                 social context may allow a system to provide socially
                 aware services to the visitors. Evidence of the social
                 context can be gained from observing/monitoring the
                 visitors' social behavior. However, automatic
                 identification of a social context requires, on the one
                 hand, identifying typical social behavior patterns and,
                 on the other, using relevant sensors that measure
                 various signals and reason about them to detect the
                 visitors' social behavior. We present such typical
                 social behavior patterns of visitor pairs, identified
                 by observations, and then the instrumentation,
                 detection process, reasoning, and analysis of measured
                 signals that enable us to detect the visitors' social
                 behavior. Simple sensors' data, such as proximity to
                 other visitors, proximity to museum points of interest,
                 and visitor orientation are used to detect social
                 synchronization, attention to the social companion, and
                 interest in museum exhibits. The presented approach may
                 allow future research to offer adaptive services to
                 museum visitors based on their social context to
                 support their group visit experience better.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Caramiaux:2015:AGR,
  author =       "Baptiste Caramiaux and Nicola Montecchio and Atau
                 Tanaka and Fr{\'e}d{\'e}ric Bevilacqua",
  title =        "Adaptive Gesture Recognition with Variation Estimation
                 for Interactive Systems",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2643204",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article presents a gesture recognition/adaptation
                 system for human--computer interaction applications
                 that goes beyond activity classification and that, as a
                 complement to gesture labeling, characterizes the
                 movement execution. We describe a template-based
                 recognition method that simultaneously aligns the input
                 gesture to the templates using a Sequential Monte Carlo
                 inference technique. Contrary to standard
                 template-based methods based on dynamic programming,
                 such as Dynamic Time Warping, the algorithm has an
                 adaptation process that tracks gesture variation in
                 real time. The method continuously updates, during
                 execution of the gesture, the estimated parameters and
                 recognition results, which offers key advantages for
                 continuous human--machine interaction. The technique is
                 evaluated in several different ways: Recognition and
                 early recognition are evaluated on 2D onscreen pen
                 gestures; adaptation is assessed on synthetic data; and
                 both early recognition and adaptation are evaluated in
                 a user study involving 3D free-space gestures. The
                 method is robust to noise, and successfully adapts to
                 parameter variation. Moreover, it performs recognition
                 as well as or better than nonadapting offline
                 template-based methods.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Cooney:2015:AIS,
  author =       "Martin Cooney and Shuichi Nishio and Hiroshi
                 Ishiguro",
  title =        "Affectionate Interaction with a Small Humanoid Robot
                 Capable of Recognizing Social Touch Behavior",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2685395",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Activity recognition, involving a capability to
                 recognize people's behavior and its underlying
                 significance, will play a crucial role in facilitating
                 the integration of interactive robotic artifacts into
                 everyday human environments. In particular, social
                 intelligence in recognizing affectionate behavior will
                 offer value by allowing companion robots to bond
                 meaningfully with interacting persons. The current
                 article addresses the issue of designing an
                 affectionate haptic interaction between a person and a
                 companion robot by exploring how a small humanoid robot
                 can behave to elicit affection while recognizing
                 touches. We report on an experiment conducted to gain
                 insight into how people perceive three fundamental
                 interactive strategies in which a robot is either
                 always highly affectionate, appropriately affectionate,
                 or superficially unaffectionate (emphasizing
                 positivity, contingency, and challenge, respectively).
                 Results provide insight into the structure of
                 affectionate interaction between humans and humanoid
                 robots-underlining the importance of an interaction
                 design expressing sincere liking, stability and
                 variation-and suggest the usefulness of novel
                 modalities such as warmth and cold.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{DeCarolis:2015:ILD,
  author =       "Berardina {De Carolis} and Stefano Ferilli and
                 Domenico Redavid",
  title =        "Incremental Learning of Daily Routines as Workflows in
                 a {Smart} Home Environment",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2675063",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Smart home environments should proactively support
                 users in their activities, anticipating their needs
                 according to their preferences. Understanding what the
                 user is doing in the environment is important for
                 adapting the environment's behavior, as well as for
                 identifying situations that could be problematic for
                 the user. Enabling the environment to exploit models of
                 the user's most common behaviors is an important step
                 toward this objective. In particular, models of the
                 daily routines of a user can be exploited not only for
                 predicting his/her needs, but also for comparing the
                 actual situation at a given moment with the expected
                 one, in order to detect anomalies in his/her behavior.
                 While manually setting up process models in business
                 and factory environments may be cost-effective,
                 building models of the processes involved in people's
                 everyday life is infeasible. This fact fully justifies
                 the interest of the Ambient Intelligence community in
                 automatically learning such models from examples of
                 actual behavior. Incremental adaptation of the models
                 and the ability to express/learn complex conditions on
                 the involved tasks are also desirable. This article
                 describes how process mining can be used for learning
                 users' daily routines from a dataset of annotated
                 sensor data. The solution that we propose relies on a
                 First-Order Logic learning approach. Indeed,
                 First-Order Logic provides a single, comprehensive and
                 powerful framework for supporting all the previously
                 mentioned features. Our experiments, performed both on
                 a proprietary toy dataset and on publicly available
                 real-world ones, indicate that this approach is
                 efficient and effective for learning and modeling daily
                 routines in Smart Home Environments.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Gianni:2015:SRF,
  author =       "Mario Gianni and Geert-Jan M. Kruijff and Fiora
                 Pirri",
  title =        "A Stimulus-Response Framework for Robot Control",
  journal =      j-TIIS,
  volume =       "4",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2677198",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 29 10:52:31 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We propose in this article a new approach to robot
                 cognitive control based on a stimulus-response
                 framework that models both a robot's stimuli and the
                 robot's decision to switch tasks in response to or
                 inhibit the stimuli. In an autonomous system, we expect
                 a robot to be able to deal with the whole system of
                 stimuli and to use them to regulate its behavior in
                 real-world applications. The proposed framework
                 contributes to the state of the art of robot planning
                 and high-level control in that it provides a novel
                 perspective on the interaction between robot and
                 environment. Our approach is inspired by Gibson's
                 constructive view of the concept of a stimulus and by
                 the cognitive control paradigm of task switching. We
                 model the robot's response to a stimulus in three
                 stages. We start by defining the stimuli as perceptual
                 functions yielded by the active robot processes and
                 learned via an informed logistic regression. Then we
                 model the stimulus-response relationship by estimating
                 a score matrix that leads to the selection of a single
                 response task for each stimulus, basing the estimation
                 on low-rank matrix factorization. The decision about
                 switching takes into account both an interference cost
                 and a reconfiguration cost. The interference cost
                 weighs the effort of discontinuing the current robot
                 mental state to switch to a new state, whereas the
                 reconfiguration cost weighs the effort of activating
                 the response task. A choice is finally made based on
                 the payoff of switching. Because processes play such a
                 crucial role both in the stimulus model and in the
                 stimulus-response model, and because processes are
                 activated by actions, we address also the process
                 model, which is built on a theory of action. The
                 framework is validated by several experiments that
                 exploit a full implementation on an advanced robotic
                 platform and is compared with two known approaches to
                 replanning. Results demonstrate the practical value of
                 the system in terms of robot autonomy, flexibility, and
                 usability.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Taranta:2015:EBC,
  author =       "Eugene M. {Taranta II} and Thaddeus K. Simons and
                 Rahul Sukthankar and Joseph J. {Laviola, Jr.}",
  title =        "Exploring the Benefits of Context in {$3$D} Gesture
                 Recognition for Game-Based Virtual Environments",
  journal =      j-TIIS,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2656345",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 26 05:43:35 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present a systematic exploration of how to utilize
                 video game context (e.g., player and environmental
                 state) to modify and augment existing 3D gesture
                 recognizers to improve accuracy for large gesture sets.
                 Specifically, our work develops and evaluates three
                 strategies for incorporating context into 3D gesture
                 recognizers. These strategies include modifying the
                 well-known Rubine linear classifier to handle
                 unsegmented input streams and per-frame retraining
                 using contextual information (CA-Linear); a GPU
                 implementation of dynamic time warping (DTW) that
                 reduces the overhead of traditional DTW by utilizing
                 context to evaluate only relevant time sequences inside
                 of a multithreaded kernel (CA-DTW); and a multiclass
                 SVM with per-class probability estimation that is
                 combined with a contextually based prior probability
                 distribution (CA-SVM). We evaluate each strategy using
                 a Kinect-based third-person perspective VE game
                 prototype that combines parkour-style navigation with
                 hand-to-hand combat. Using a simple gesture collection
                 application to collect a set of 57 gestures and the
                 game prototype that implements 37 of these gestures, we
                 conduct three experiments. In the first experiment, we
                 evaluate the effectiveness of several established
                 classifiers on our gesture set and demonstrate
                 state-of-the-art results using our proposed method. In
                 our second experiment, we generate 500 random scenarios
                 having between 5 and 19 of the 57 gestures in context.
                 We show that the contextually aware classifiers
                 CA-Linear, CA-DTW, and CA-SVM significantly outperform
                 their non--contextually aware counterparts by 37.74\%,
                 36.04\%, and 20.81\%, respectively. On the basis of the
                 results of the second experiment, we derive upper-bound
                 expectations for in-game performance for the three CA
                 classifiers: 96.61\%, 86.79\%, and 96.86\%,
                 respectively. Finally, our third experiment is an
                 in-game evaluation of the three CA classifiers with and
                 without context. Our results show that through the use
                 of context, we are able to achieve an average in-game
                 recognition accuracy of 89.67\% with CA-Linear compared
                 to 65.10\% without context, 79.04\% for CA-DTW compared
                 to 58.1\% without context, and 90.85\% with CA-SVM
                 compared to 75.2\% without context.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Gil:2015:HTI,
  author =       "Yolanda Gil",
  title =        "Human Tutorial Instruction in the Raw",
  journal =      j-TIIS,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2531920",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 26 05:43:35 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Humans learn procedures from one another through a
                 variety of methods, such as observing someone do the
                 task, practicing by themselves, reading manuals or
                 textbooks, or getting instruction from a teacher. Some
                 of these methods generate examples that require the
                 learner to generalize appropriately. When procedures
                 are complex, however, it becomes unmanageable to induce
                 the procedures from examples alone. An alternative and
                 very common method for teaching procedures is tutorial
                 instruction, where a teacher describes in general terms
                 what actions to perform and possibly includes
                 explanations of the rationale for the actions. This
                 article provides an overview of the challenges in using
                 human tutorial instruction for teaching procedures to
                 computers. First, procedures can be very complex and
                 can involve many different types of interrelated
                 information, including (1) situating the instruction in
                 the context of relevant objects and their properties,
                 (2) describing the steps involved, (3) specifying the
                 organization of the procedure in terms of relationships
                 among steps and substeps, and (4) conveying control
                 structures. Second, human tutorial instruction is
                 naturally plagued with omissions, oversights,
                 unintentional inconsistencies, errors, and simply poor
                 design. The article presents a survey of work from the
                 literature that highlights the nature of these
                 challenges and illustrates them with numerous examples
                 of instruction in many domains. Major research
                 challenges in this area are highlighted, including the
                 difficulty of the learning task when procedures are
                 complex, the need to overcome omissions and errors in
                 the instruction, the design of a natural user interface
                 to specify procedures, the management of the
                 interaction of a human with a learning system, and the
                 combination of tutorial instruction with other teaching
                 modalities.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Pejsa:2015:GAM,
  author =       "Tomislav Pejsa and Sean Andrist and Michael Gleicher
                 and Bilge Mutlu",
  title =        "Gaze and Attention Management for Embodied
                 Conversational Agents",
  journal =      j-TIIS,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2724731",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 26 05:43:35 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "To facilitate natural interactions between humans and
                 embodied conversational agents (ECAs), we need to endow
                 the latter with the same nonverbal cues that humans use
                 to communicate. Gaze cues in particular are integral in
                 mechanisms for communication and management of
                 attention in social interactions, which can trigger
                 important social and cognitive processes, such as
                 establishment of affiliation between people or learning
                 new information. The fundamental building blocks of
                 gaze behaviors are gaze shifts: coordinated movements
                 of the eyes, head, and body toward objects and
                 information in the environment. In this article, we
                 present a novel computational model for gaze shift
                 synthesis for ECAs that supports parametric control
                 over coordinated eye, head, and upper body movements.
                 We employed the model in three studies with human
                 participants. In the first study, we validated the
                 model by showing that participants are able to
                 interpret the agent's gaze direction accurately. In the
                 second and third studies, we showed that by adjusting
                 the participation of the head and upper body in gaze
                 shifts, we can control the strength of the attention
                 signals conveyed, thereby strengthening or weakening
                 their social and cognitive effects. The second study
                 shows that manipulation of eye--head coordination in
                 gaze enables an agent to convey more information or
                 establish stronger affiliation with participants in a
                 teaching task, while the third study demonstrates how
                 manipulation of upper body coordination enables the
                 agent to communicate increased interest in objects in
                 the environment.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Deng:2015:ESA,
  author =       "James J. Deng and Clement H. C. Leung and Alfredo
                 Milani and Li Chen",
  title =        "Emotional States Associated with Music:
                 Classification, Prediction of Changes, and
                 Consideration in Recommendation",
  journal =      j-TIIS,
  volume =       "5",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2723575",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 26 05:43:35 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present several interrelated technical and
                 empirical contributions to the problem of emotion-based
                 music recommendation and show how they can be applied
                 in a possible usage scenario. The contributions are (1)
                 a new three-dimensional resonance-arousal-valence model
                 for the representation of emotion expressed in music,
                 together with methods for automatically classifying a
                 piece of music in terms of this model, using robust
                 regression methods applied to musical/acoustic
                 features; (2) methods for predicting a listener's
                 emotional state on the assumption that the emotional
                 state has been determined entirely by a sequence of
                 pieces of music recently listened to, using conditional
                 random fields and taking into account the decay of
                 emotion intensity over time; and (3) a method for
                 selecting a ranked list of pieces of music that match a
                 particular emotional state, using a minimization
                 iteration method. A series of experiments yield
                 information about the validity of our
                 operationalizations of these contributions. Throughout
                 the article, we refer to an illustrative usage scenario
                 in which all of these contributions can be exploited,
                 where it is assumed that (1) a listener's emotional
                 state is being determined entirely by the music that he
                 or she has been listening to and (2) the listener wants
                 to hear additional music that matches his or her
                 current emotional state. The contributions are intended
                 to be useful in a variety of other scenarios as well.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Mazilu:2015:WAG,
  author =       "Sinziana Mazilu and Ulf Blanke and Moran Dorfman and
                 Eran Gazit and Anat Mirelman and Jeffrey M. Hausdorff
                 and Gerhard Tr{\"o}ster",
  title =        "A Wearable Assistant for Gait Training for
                 {Parkinson}'s Disease with Freezing of Gait in
                 Out-of-the-Lab Environments",
  journal =      j-TIIS,
  volume =       "5",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2701431",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Mar 26 05:43:35 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "People with Parkinson's disease (PD) suffer from
                 declining mobility capabilities, which cause a
                 prevalent risk of falling. Commonly, short periods of
                 motor blocks occur during walking, known as freezing of
                 gait (FoG). To slow the progressive decline of motor
                 abilities, people with PD usually undertake stationary
                 motor-training exercises in the clinics or supervised
                 by physiotherapists. We present a wearable system for
                 the support of people with PD and FoG. The system is
                 designed for independent use. It enables motor training
                 and gait assistance at home and other unsupervised
                 environments. The system consists of three components.
                 First, FoG episodes are detected in real time using
                 wearable inertial sensors and a smartphone as the
                 processing unit. Second, a feedback mechanism triggers
                 a rhythmic auditory signal to the user to alleviate
                 freeze episodes in an assistive mode. Third, the
                 smartphone-based application features support for
                 training exercises. Moreover, the system allows
                 unobtrusive and long-term monitoring of the user's
                 clinical condition by transmitting sensing data and
                 statistics to a telemedicine service. We investigate
                 the at-home acceptance of the wearable system in a
                 study with nine PD subjects. Participants deployed and
                 used the system on their own, without any clinical
                 support, at their homes during three protocol sessions
                 in 1 week. Users' feedback suggests an overall positive
                 attitude toward adopting and using the system in their
                 daily life, indicating that the system supports them in
                 improving their gait. Further, in a data-driven
                 analysis with sensing data from five participants, we
                 study whether there is an observable effect on the gait
                 during use of the system. In three out of five
                 subjects, we observed a decrease in FoG duration
                 distributions over the protocol days during
                 gait-training exercises. Moreover, sensing data-driven
                 analysis shows a decrease in FoG duration and FoG
                 number in four out of five participants when they use
                 the system as a gait-assistive tool during normal daily
                 life activities at home.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Salah:2015:BIS,
  author =       "Albert Ali Salah and Hayley Hung and Oya Aran and
                 Hatice Gunes and Matthew Turk",
  title =        "Brief Introduction to the Special Issue on Behavior
                 Understanding for Arts and Entertainment",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2786762",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction describes the aims and
                 scope of the special issue of the ACM Transactions on
                 Interactive Intelligent Systems on Behavior
                 Understanding for Arts and Entertainment, which is
                 being published in issues 2 and 3 of volume 5 of the
                 journal. Here we offer a brief introduction to the use
                 of behavior analysis for interactive systems that
                 involve creativity in either the creator or the
                 consumer of a work of art. We then characterize each of
                 the five articles included in this first part of the
                 special issue, which span a wide range of
                 applications.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Grenader:2015:VIA,
  author =       "Emily Grenader and Danilo Gasques Rodrigues and
                 Fernando Nos and Nadir Weibel",
  title =        "The {VideoMob} Interactive Art Installation Connecting
                 Strangers through Inclusive Digital Crowds",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2768208",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "VideoMob is an interactive video platform and an
                 artwork that enables strangers visiting different
                 installation locations to interact across time and
                 space through a computer interface that detects their
                 presence, video-records their actions while
                 automatically removing the video background through
                 computer vision, and co-situates visitors as part of
                 the same digital environment. Through the combination
                 of individual user videos to form a digital crowd,
                 strangers are connected through the graphic display.
                 Our work is inspired by the way distant people can
                 interact with each other through technology and
                 influenced by artists working in the realm of
                 interactive art. We deployed VideoMob in a variety of
                 settings, locations, and contexts to observe hundreds
                 of visitors' reactions. By analyzing behavioral data
                 collected through depth cameras from our 1,068
                 recordings across eight venues, we studied how
                 participants behave when given the opportunity to
                 record their own video portrait into the artwork. We
                 report the specific activity performed in front of the
                 camera and the influences that existing crowds impose
                 on new participants. Our analysis informs the
                 integration of a series of possible novel interaction
                 paradigms based on real-time analysis of the visitors'
                 behavior through specific computer vision and machine
                 learning techniques that have the potential to increase
                 the engagement of the artwork's visitors and to impact
                 user experience.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Sartori:2015:AAP,
  author =       "Andreza Sartori and Victoria Yanulevskaya and Almila
                 Akdag Salah and Jasper Uijlings and Elia Bruni and Nicu
                 Sebe",
  title =        "Affective Analysis of Professional and Amateur
                 Abstract Paintings Using Statistical Analysis and Art
                 Theory",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2768209",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "When artists express their feelings through the
                 artworks they create, it is believed that the resulting
                 works transform into objects with ``emotions'' capable
                 of conveying the artists' mood to the audience. There
                 is little to no dispute about this belief: Regardless
                 of the artwork, genre, time, and origin of creation,
                 people from different backgrounds are able to read the
                 emotional messages. This holds true even for the most
                 abstract paintings. Could this idea be applied to
                 machines as well? Can machines learn what makes a work
                 of art ``emotional''? In this work, we employ a
                 state-of-the-art recognition system to learn which
                 statistical patterns are associated with positive and
                 negative emotions on two different datasets that
                 comprise professional and amateur abstract artworks.
                 Moreover, we analyze and compare two different
                 annotation methods in order to establish the ground
                 truth of positive and negative emotions in abstract
                 art. Additionally, we use computer vision techniques to
                 quantify which parts of a painting evoke positive and
                 negative emotions. We also demonstrate how the
                 quantification of evidence for positive and negative
                 emotions can be used to predict which parts of a
                 painting people prefer to focus on. This method opens
                 new opportunities of research on why a specific
                 painting is perceived as emotional at global and local
                 scales.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Sanchez-Cortes:2015:MVM,
  author =       "Dairazalia Sanchez-Cortes and Shiro Kumano and
                 Kazuhiro Otsuka and Daniel Gatica-Perez",
  title =        "In the Mood for Vlog: Multimodal Inference in
                 Conversational Social Video",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2641577",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The prevalent ``share what's on your mind'' paradigm
                 of social media can be examined from the perspective of
                 mood: short-term affective states revealed by the
                 shared data. This view takes on new relevance given the
                 emergence of conversational social video as a popular
                 genre among viewers looking for entertainment and among
                 video contributors as a channel for debate, expertise
                 sharing, and artistic expression. From the perspective
                 of human behavior understanding, in conversational
                 social video both verbal and nonverbal information is
                 conveyed by speakers and decoded by viewers. We present
                 a systematic study of classification and ranking of
                 mood impressions in social video, using vlogs from
                 YouTube. Our approach considers eleven natural mood
                 categories labeled through crowdsourcing by external
                 observers on a diverse set of conversational vlogs. We
                 extract a comprehensive number of nonverbal and verbal
                 behavioral cues from the audio and video channels to
                 characterize the mood of vloggers. Then we implement
                 and validate vlog classification and vlog ranking tasks
                 using supervised learning methods. Following a
                 reliability and correlation analysis of the mood
                 impression data, our study demonstrates that, while the
                 problem is challenging, several mood categories can be
                 inferred with promising performance. Furthermore,
                 multimodal features perform consistently better than
                 single-channel features. Finally, we show that
                 addressing mood as a ranking problem is a promising
                 practical direction for several of the mood categories
                 studied.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Vezzani:2015:GPS,
  author =       "Roberto Vezzani and Martino Lombardi and Augusto
                 Pieracci and Paolo Santinelli and Rita Cucchiara",
  title =        "A General-Purpose Sensing Floor Architecture for
                 Human-Environment Interaction",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2751566",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Smart environments are now designed as natural
                 interfaces to capture and understand human behavior
                 without a need for explicit human-computer interaction.
                 In this article, we present a general-purpose
                 architecture that acquires and understands human
                 behaviors through a sensing floor. The pressure field
                 generated by moving people is captured and analyzed.
                 Specific actions and events are then detected by a
                 low-level processing engine and sent to high-level
                 interfaces providing different functions. The proposed
                 architecture and sensors are modular, general-purpose,
                 cheap, and suitable for both small- and large-area
                 coverage. Some sample entertainment and virtual reality
                 applications that we developed to test the platform are
                 presented.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Baur:2015:CAA,
  author =       "Tobias Baur and Gregor Mehlmann and Ionut Damian and
                 Florian Lingenfelser and Johannes Wagner and Birgit
                 Lugrin and Elisabeth Andr{\'e} and Patrick Gebhard",
  title =        "Context-Aware Automated Analysis and Annotation of
                 Social Human--Agent Interactions",
  journal =      j-TIIS,
  volume =       "5",
  number =       "2",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2764921",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Fri Aug 7 09:18:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The outcome of interpersonal interactions depends not
                 only on the contents that we communicate verbally, but
                 also on nonverbal social signals. Because a lack of
                 social skills is a common problem for a significant
                 number of people, serious games and other training
                 environments have recently become the focus of
                 research. In this work, we present NovA ( No n v erbal
                 behavior A nalyzer), a system that analyzes and
                 facilitates the interpretation of social signals
                 automatically in a bidirectional interaction with a
                 conversational agent. It records data of interactions,
                 detects relevant social cues, and creates descriptive
                 statistics for the recorded data with respect to the
                 agent's behavior and the context of the situation. This
                 enhances the possibilities for researchers to
                 automatically label corpora of human--agent
                 interactions and to give users feedback on strengths
                 and weaknesses of their social behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Salah:2015:BUA,
  author =       "Albert Ali Salah and Hayley Hung and Oya Aran and
                 Hatice Gunes and Matthew Turk",
  title =        "Behavior Understanding for Arts and Entertainment",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2817208",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This editorial introduction complements the shorter
                 introduction to the first part of the two-part special
                 issue on Behavior Understanding for Arts and
                 Entertainment. It offers a more expansive discussion of
                 the use of behavior analysis for interactive systems
                 that involve creativity, either for the producer or the
                 consumer of such a system. We first summarise the two
                 articles that appear in this second part of the special
                 issue. We then discuss general questions and challenges
                 in this domain that were suggested by the entire set of
                 seven articles of the special issue and by the comments
                 of the reviewers of these articles.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Alaoui:2015:IVM,
  author =       "Sarah Fdili Alaoui and Frederic Bevilacqua and
                 Christian Jacquemin",
  title =        "Interactive Visuals as Metaphors for Dance Movement
                 Qualities",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2738219",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The notion of ``movement qualities'' is central in
                 contemporary dance; it describes the manner in which a
                 movement is executed. Movement qualities convey
                 information revealing movement expressiveness; their
                 use has strong potential for movement-based interaction
                 with applications in arts, entertainment, education, or
                 rehabilitation. The purpose of our research is to
                 design and evaluate interactive reflexive visuals for
                 movement qualities. The theoretical basis for this
                 research is drawn from a collaboration with the members
                 of the international dance company Emio Greco|PC to
                 study their formalization of movement qualities. We
                 designed a pedagogical interactive installation called
                 Double Skin/Double Mind (DS/DM) for the analysis and
                 visualization of movement qualities through physical
                 model-based interactive renderings. In this article, we
                 first evaluate dancers' perception of the visuals as
                 metaphors for movement qualities. This evaluation shows
                 that, depending on the physical model parameterization,
                 the visuals are capable of generating dynamic behaviors
                 that the dancers associate with DS/DM movement
                 qualities. Moreover, we evaluate dance students' and
                 professionals' experience of the interactive visuals in
                 the context of a dance pedagogical workshop and a
                 professional dance training. The results of these
                 evaluations show that the dancers consider the
                 interactive visuals to be a reflexive system that
                 encourages them to perform, improves their experience,
                 and contributes to a better understanding of movement
                 qualities. Our findings support research on interactive
                 systems for real-time analysis and visualization of
                 movement qualities, which open new perspectives in
                 movement-based interaction design.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yang:2015:QSM,
  author =       "Yi-Hsuan Yang and Yuan-Ching Teng",
  title =        "Quantitative Study of Music Listening Behavior in a
                 {Smartphone} Context",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2738220",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Context-based services have attracted increasing
                 attention because of the prevalence of sensor-rich
                 mobile devices such as smartphones. The idea is to
                 recommend information that a user would be interested
                 in according to the user's surrounding context.
                 Although remarkable progress has been made to
                 contextualize music playback, relatively little
                 research has been made using a large collection of
                 real-life listening records collected in situ. In light
                 of this fact, we present in this article a quantitative
                 study of the personal, situational, and musical factors
                 of musical preference in a smartphone context, using a
                 new dataset comprising the listening records and
                 self-report context annotation of 48 participants
                 collected over 3wk via an Android app. Although the
                 number of participants is limited and the population is
                 biased towards students, the dataset is unique in that
                 it is collected in a daily context, with sensor data
                 and music listening profiles recorded at the same time.
                 We investigate 3 core research questions evaluating the
                 strength of a rich set of low-level and high-level
                 audio features for music usage auto-tagging (i.e.,
                 music preference in different user activities), the
                 strength of time-domain and frequency-domain sensor
                 features for user activity classification, and how user
                 factors such as personality traits are correlated with
                 the predictability of music usage and user activity,
                 using a closed set of 8 activity classes. We provide an
                 in-depth discussion of the main findings of this study
                 and their implications for the development of
                 context-based music services for smartphones.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Bott:2015:WRW,
  author =       "Jared N. Bott and Joseph J. {Laviola Jr.}",
  title =        "The {WOZ Recognizer}: a {Wizard of Oz} Sketch
                 Recognition System",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2743029",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Sketch recognition has the potential to be an
                 important input method for computers in the coming
                 years, particularly for STEM (science, technology,
                 engineering, and math) education. However, designing
                 and building an accurate and sophisticated sketch
                 recognition system is a time-consuming and daunting
                 task. Since sketch recognition mistakes are still
                 common, it is important to understand how users
                 perceive and tolerate recognition errors and other user
                 interface elements with these imperfect systems. In
                 order to solve this problem, we developed a Wizard of
                 Oz sketch recognition tool, the WOZ Recognizer, that
                 supports controlled recognition accuracy, multiple
                 recognition modes, and multiple sketching domains for
                 performing controlled experiments. We present the
                 design of the WOZ Recognizer and our process for
                 representing recognition domains using graphs and
                 symbol alphabets. In addition, we discuss how sketches
                 are altered, how to control the WOZ Recognizer, and how
                 users interact with it. Finally, we present an expert
                 user case study that examines the WOZ Recognizer's
                 usability.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Soto:2015:EVA,
  author =       "Axel J. Soto and Ryan Kiros and Vlado Keselj and
                 Evangelos Milios",
  title =        "Exploratory Visual Analysis and Interactive Pattern
                 Extraction from Semi-Structured Data",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2812115",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Semi-structured documents are a common type of data
                 containing free text in natural language (unstructured
                 data) as well as additional information about the
                 document, or meta-data, typically following a schema or
                 controlled vocabulary (structured data). Simultaneous
                 analysis of unstructured and structured data enables
                 the discovery of hidden relationships that cannot be
                 identified from either of these sources when analyzed
                 independently of each other. In this work, we present a
                 visual text analytics tool for semi-structured
                 documents (ViTA-SSD), that aims to support the user in
                 the exploration and finding of insightful patterns in a
                 visual and interactive manner in a semi-structured
                 collection of documents. It achieves this goal by
                 presenting to the user a set of coordinated
                 visualizations that allows the linking of the metadata
                 with interactively generated clusters of documents in
                 such a way that relevant patterns can be easily
                 spotted. The system contains two novel approaches in
                 its back end: a feature-learning method to learn a
                 compact representation of the corpus and a
                 fast-clustering approach that has been redesigned to
                 allow user supervision. These novel contributions make
                 it possible for the user to interact with a large and
                 dynamic document collection and to perform several text
                 analytical tasks more efficiently. Finally, we present
                 two use cases that illustrate the suitability of the
                 system for in-depth interactive exploration of
                 semi-structured document collections, two user studies,
                 and results of several evaluations of our text-mining
                 components.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Meignan:2015:RTI,
  author =       "David Meignan and Sigrid Knust and Jean-Marc Frayret
                 and Gilles Pesant and Nicolas Gaud",
  title =        "A Review and Taxonomy of Interactive Optimization
                 Methods in Operations Research",
  journal =      j-TIIS,
  volume =       "5",
  number =       "3",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2808234",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat Oct 17 18:18:51 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "This article presents a review and a classification of
                 interactive optimization methods. These interactive
                 methods are used for solving optimization problems. The
                 interaction with an end user or decision maker aims at
                 improving the efficiency of the optimization procedure,
                 enriching the optimization model, or informing the user
                 regarding the solutions proposed by the optimization
                 system. First, we present the challenges of using
                 optimization methods as a tool for supporting decision
                 making, and we justify the integration of the user in
                 the optimization process. This integration is generally
                 achieved via a dynamic interaction between the user and
                 the system. Next, the different classes of interactive
                 optimization approaches are presented. This detailed
                 review includes trial and error, interactive
                 reoptimization, interactive multiobjective
                 optimization, interactive evolutionary algorithms,
                 human-guided search, and other approaches that are less
                 well covered in the research literature. On the basis
                 of this review, we propose a classification that aims
                 to better describe and compare interaction mechanisms.
                 This classification offers two complementary views on
                 interactive optimization methods. The first perspective
                 focuses on the user's contribution to the optimization
                 process, and the second concerns the components of
                 interactive optimization systems. Finally, on the basis
                 of this review and classification, we identify some
                 open issues and potential perspectives for interactive
                 optimization methods.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Wang:2016:ART,
  author =       "Weiyi Wang and Valentin Enescu and Hichem Sahli",
  title =        "Adaptive Real-Time Emotion Recognition from Body
                 Movements",
  journal =      j-TIIS,
  volume =       "5",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2738221",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 7 16:06:24 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We propose a real-time system that continuously
                 recognizes emotions from body movements. The combined
                 low-level 3D postural features and high-level kinematic
                 and geometrical features are fed to a Random Forests
                 classifier through summarization (statistical values)
                 or aggregation (bag of features). In order to improve
                 the generalization capability and the robustness of the
                 system, a novel semisupervised adaptive algorithm is
                 built on top of the conventional Random Forests
                 classifier. The MoCap UCLIC affective gesture database
                 (labeled with four emotions) was used to train the
                 Random Forests classifier, which led to an overall
                 recognition rate of 78\% using a 10-fold
                 cross-validation. Subsequently, the trained classifier
                 was used in a stream-based semisupervised Adaptive
                 Random Forests method for continuous unlabeled Kinect
                 data classification. The very low update cost of our
                 adaptive classifier makes it highly suitable for data
                 stream applications. Tests performed on the publicly
                 available emotion datasets (body gestures and facial
                 expressions) indicate that our new classifier
                 outperforms existing algorithms for data streams in
                 terms of accuracy and computational costs.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Harper:2016:MDH,
  author =       "F. Maxwell Harper and Joseph A. Konstan",
  title =        "The {MovieLens} Datasets: History and Context",
  journal =      j-TIIS,
  volume =       "5",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2827872",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 7 16:06:24 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The MovieLens datasets are widely used in education,
                 research, and industry. They are downloaded hundreds of
                 thousands of times each year, reflecting their use in
                 popular press programming books, traditional and online
                 courses, and software. These datasets are a product of
                 member activity in the MovieLens movie recommendation
                 system, an active research platform that has hosted
                 many experiments since its launch in 1997. This article
                 documents the history of MovieLens and the MovieLens
                 datasets. We include a discussion of lessons learned
                 from running a long-standing, live research platform
                 from the perspective of a research organization. We
                 document best practices and limitations of using the
                 MovieLens datasets in new research.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yordanova:2016:PSD,
  author =       "Kristina Yordanova and Thomas Kirste",
  title =        "A Process for Systematic Development of Symbolic
                 Models for Activity Recognition",
  journal =      j-TIIS,
  volume =       "5",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2806893",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 7 16:06:24 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Several emerging approaches to activity recognition
                 (AR) combine symbolic representation of user actions
                 with probabilistic elements for reasoning under
                 uncertainty. These approaches provide promising results
                 in terms of recognition performance, coping with the
                 uncertainty of observations, and model size explosion
                 when complex problems are modelled. But experience has
                 shown that it is not always intuitive to model even
                 seemingly simple problems. To date, there are no
                 guidelines for developing such models. To address this
                 problem, in this work we present a development process
                 for building symbolic models that is based on
                 experience acquired so far as well as on existing
                 engineering and data analysis workflows. The proposed
                 process is a first attempt at providing structured
                 guidelines and practices for designing, modelling, and
                 evaluating human behaviour in the form of symbolic
                 models for AR. As an illustration of the process, a
                 simple example from the office domain was developed.
                 The process was evaluated in a comparative study of an
                 intuitive process and the proposed process. The results
                 showed a significant improvement over the intuitive
                 process. Furthermore, the study participants reported
                 greater ease of use and perceived effectiveness when
                 following the proposed process. To evaluate the
                 applicability of the process to more complex AR
                 problems, it was applied to a problem from the kitchen
                 domain. The results showed that following the proposed
                 process yielded an average accuracy of 78\%. The
                 developed model outperformed state-of-the-art methods
                 applied to the same dataset in previous work, and it
                 performed comparably to a symbolic model developed by a
                 model expert without following the proposed development
                 process.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yamazaki:2016:ITN,
  author =       "Keiichi Yamazaki and Akiko Yamazaki and Keiko Ikeda
                 and Chen Liu and Mihoko Fukushima and Yoshinori
                 Kobayashi and Yoshinori Kuno",
  title =        "{``I'll Be There Next''}: a Multiplex Care Robot
                 System that Conveys Service Order Using Gaze Gestures",
  journal =      j-TIIS,
  volume =       "5",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2844542",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 7 16:06:24 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In this article, we discuss our findings from an
                 ethnographic study at an elderly care center where we
                 observed the utilization of two different functions of
                 human gaze to convey service order (i.e., ``who is
                 served first and who is served next''). In one case,
                 when an elderly person requested assistance, the gaze
                 of the care worker communicated that he/she would serve
                 that client next in turn. In the other case, the gaze
                 conveyed a request to the service seeker to wait until
                 the care worker finished attending the current client.
                 Each gaze function depended on the care worker's
                 current engagement and other behaviors. We sought to
                 integrate these findings into the development of a
                 robot that might function more effectively in multiple
                 human-robot party settings. We focused on the multiple
                 functions of gaze and bodily actions, implementing
                 those functions into our robot. We conducted three
                 experiments to gauge a combination of gestures and
                 gazes performed by our robot. This article demonstrates
                 that the employment of gaze is an important
                 consideration when developing robots that can interact
                 effectively in multiple human-robot party settings.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Nakano:2016:GRG,
  author =       "Yukiko I. Nakano and Takashi Yoshino and Misato
                 Yatsushiro and Yutaka Takase",
  title =        "Generating Robot Gaze on the Basis of Participation
                 Roles and Dominance Estimation in Multiparty
                 Interaction",
  journal =      j-TIIS,
  volume =       "5",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2743028",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Thu Jan 7 16:06:24 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Gaze is an important nonverbal feedback signal in
                 multiparty face-to-face conversations. It is well known
                 that gaze behaviors differ depending on participation
                 role: speaker, addressee, or side participant. In this
                 study, we focus on dominance as another factor that
                 affects gaze. First, we conducted an empirical study
                 and analyzed its results that showed how gaze behaviors
                 are affected by both dominance and participation roles.
                 Then, using speech and gaze information that was
                 statistically significant for distinguishing the more
                 dominant and less dominant person in an empirical
                 study, we established a regression-based model for
                 estimating conversational dominance. On the basis of
                 the model, we implemented a dominance estimation
                 mechanism that processes online speech and head
                 direction data. Then we applied our findings to
                 human-robot interaction. To design robot gaze
                 behaviors, we analyzed gaze transitions with respect to
                 participation roles and dominance and implemented
                 gaze-transition models as robot gaze behavior
                 generation rules. Finally, we evaluated a humanoid
                 robot that has dominance estimation functionality and
                 determines its gaze based on the gaze models, and we
                 found that dominant participants had a better
                 impression of less dominant robot gaze behaviors. This
                 suggests that a robot using our gaze models was
                 preferred to a robot that was simply looking at the
                 speaker. We have demonstrated the importance of
                 considering dominance in human-robot multiparty
                 interaction.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Nakano:2016:ISI,
  author =       "Yukiko I. Nakano and Roman Bednarik and Hung-Hsuan
                 Huang and Kristiina Jokinen",
  title =        "Introduction to the Special Issue on New Directions in
                 Eye Gaze for Interactive Intelligent Systems",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2893485",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Eye gaze has been used broadly in interactive
                 intelligent systems. The research area has grown in
                 recent years to cover emerging topics that go beyond
                 the traditional focus on interaction between a single
                 user and an interactive system. This special issue
                 presents five articles that explore new directions of
                 gaze-based interactive intelligent systems, ranging
                 from communication robots in dyadic and multiparty
                 conversations to a driving simulator that uses eye gaze
                 evidence to critique learners' behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Xu:2016:SYS,
  author =       "Tian (Linger) Xu and Hui Zhang and Chen Yu",
  title =        "See You See Me: The Role of Eye Contact in Multimodal
                 Human-Robot Interaction",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2882970",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We focus on a fundamental looking behavior in
                 human-robot interactions-gazing at each other's face.
                 Eye contact and mutual gaze between two social partners
                 are critical in smooth human-human interactions.
                 Therefore, investigating at what moments and in what
                 ways a robot should look at a human user's face as a
                 response to the human's gaze behavior is an important
                 topic. Toward this goal, we developed a gaze-contingent
                 human-robot interaction system, which relied on
                 momentary gaze behaviors from a human user to control
                 an interacting robot in real time. Using this system,
                 we conducted an experiment in which human participants
                 interacted with the robot in a joint-attention task. In
                 the experiment, we systematically manipulated the
                 robot's gaze toward the human partner's face in real
                 time and then analyzed the human's gaze behavior as a
                 response to the robot's gaze behavior. We found that
                 more face looks from the robot led to more look-backs
                 (to the robot's face) from human participants, and
                 consequently, created more mutual gaze and eye contact
                 between the two. Moreover, participants demonstrated
                 more coordinated and synchronized multimodal behaviors
                 between speech and gaze when more eye contact was
                 successfully established and maintained.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Wade:2016:GCA,
  author =       "Joshua Wade and Lian Zhang and Dayi Bian and Jing Fan
                 and Amy Swanson and Amy Weitlauf and Medha Sarkar and
                 Zachary Warren and Nilanjan Sarkar",
  title =        "A Gaze-Contingent Adaptive Virtual Reality Driving
                 Environment for Intervention in Individuals with Autism
                 Spectrum Disorders",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2892636",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In addition to social and behavioral deficits,
                 individuals with Autism Spectrum Disorder (ASD) often
                 struggle to develop the adaptive skills necessary to
                 achieve independence. Driving intervention in
                 individuals with ASD is a growing area of study, but it
                 is still widely under-researched. We present the
                 development and preliminary assessment of a
                 gaze-contingent adaptive virtual reality driving
                 simulator that uses real-time gaze information to adapt
                 the driving environment with the aim of providing a
                 more individualized method of driving intervention. We
                 conducted a small pilot study of 20 adolescents with
                 ASD using our system: 10 with the adaptive
                 gaze-contingent version of the system and 10 in a
                 purely performance-based version. Preliminary results
                 suggest that the novel intervention system may be
                 beneficial in teaching driving skills to individuals
                 with ASD.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Ishii:2016:PWW,
  author =       "Ryo Ishii and Kazuhiro Otsuka and Shiro Kumano and
                 Junji Yamato",
  title =        "Prediction of Who Will Be the Next Speaker and When
                 Using Gaze Behavior in Multiparty Meetings",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2757284",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In multiparty meetings, participants need to predict
                 the end of the speaker's utterance and who will start
                 speaking next, as well as consider a strategy for good
                 timing to speak next. Gaze behavior plays an important
                 role in smooth turn-changing. This article proposes a
                 prediction model that features three processing steps
                 to predict (I) whether turn-changing or turn-keeping
                 will occur, (II) who will be the next speaker in
                 turn-changing, and (III) the timing of the start of the
                 next speaker's utterance. For the feature values of the
                 model, we focused on gaze transition patterns and the
                 timing structure of eye contact between a speaker and a
                 listener near the end of the speaker's utterance. Gaze
                 transition patterns provide information about the order
                 in which gaze behavior changes. The timing structure of
                 eye contact is defined as who looks at whom and who
                 looks away first, the speaker or listener, when eye
                 contact between the speaker and a listener occurs. We
                 collected corpus data of multiparty meetings, using the
                 data to demonstrate relationships between gaze
                 transition patterns and timing structure and situations
                 (I), (II), and (III). The results of our analyses
                 indicate that the gaze transition pattern of the
                 speaker and listener and the timing structure of eye
                 contact have a strong association with turn-changing,
                 the next speaker in turn-changing, and the start time
                 of the next utterance. On the basis of the results, we
                 constructed prediction models using the gaze transition
                 patterns and timing structure. The gaze transition
                 patterns were found to be useful in predicting
                 turn-changing, the next speaker in turn-changing, and
                 the start time of the next utterance. Contrary to
                 expectations, we did not find that the timing structure
                 is useful for predicting the next speaker and the start
                 time. This study opens up new possibilities for
                 predicting the next speaker and the timing of the next
                 utterance using gaze transition patterns in multiparty
                 meetings.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Dardard:2016:ACL,
  author =       "Floriane Dardard and Giorgio Gnecco and Donald
                 Glowinski",
  title =        "Automatic Classification of Leading Interactions in a
                 String Quartet",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2818739",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "The aim of the present work is to analyze
                 automatically the leading interactions between the
                 musicians of a string quartet, using machine-learning
                 techniques applied to nonverbal features of the
                 musicians' behavior, which are detected through the
                 help of a motion-capture system. We represent these
                 interactions by a graph of ``influence'' of the
                 musicians, which displays the relations ``is
                 following'' and ``is not following'' with weighted
                 directed arcs. The goal of the machine-learning problem
                 investigated is to assign weights to these arcs in an
                 optimal way. Since only a subset of the available
                 training examples are labeled, a semisupervised support
                 vector machine is used, which is based on a linear
                 kernel to limit its model complexity. Specific
                 potential applications within the field of
                 human-computer interaction are also discussed, such as
                 e-learning, networked music performance, and social
                 active listening.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Piana:2016:ABG,
  author =       "Stefano Piana and Alessandra Staglian{\`o} and
                 Francesca Odone and Antonio Camurri",
  title =        "Adaptive Body Gesture Representation for Automatic
                 Emotion Recognition",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2818740",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "We present a computational model and a system for the
                 automated recognition of emotions starting from
                 full-body movement. Three-dimensional motion data of
                 full-body movements are obtained either from
                 professional optical motion-capture systems (Qualisys)
                 or from low-cost RGB-D sensors (Kinect and Kinect2). A
                 number of features are then automatically extracted at
                 different levels, from kinematics of a single joint to
                 more global expressive features inspired by psychology
                 and humanistic theories (e.g., contraction index,
                 fluidity, and impulsiveness). An abstraction layer
                 based on dictionary learning further processes these
                 movement features to increase the model generality and
                 to deal with intraclass variability, noise, and
                 incomplete information characterizing emotion
                 expression in human movement. The resulting feature
                 vector is the input for a classifier performing
                 real-time automatic emotion recognition based on linear
                 support vector machines. The recognition performance of
                 the proposed model is presented and discussed,
                 including the tradeoff between precision of the
                 tracking measures (we compare the Kinect RGB-D sensor
                 and the Qualisys motion-capture system) versus
                 dimension of the training dataset. The resulting model
                 and system have been successfully applied in the
                 development of serious games for helping autistic
                 children learn to recognize and express emotions by
                 means of their full-body movement.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Hoque:2016:ITM,
  author =       "Enamul Hoque and Giuseppe Carenini",
  title =        "Interactive Topic Modeling for Exploring Asynchronous
                 Online Conversations: Design and Evaluation of
                 {ConVisIT}",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2854158",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Since the mid-2000s, there has been exponential growth
                 of asynchronous online conversations, thanks to the
                 rise of social media. Analyzing and gaining insights
                 from such conversations can be quite challenging for a
                 user, especially when the discussion becomes very long.
                 A promising solution to this problem is topic modeling,
                 since it may help the user to understand quickly what
                 was discussed in a long conversation and to explore the
                 comments of interest. However, the results of topic
                 modeling can be noisy, and they may not match the
                 user's current information needs. To address this
                 problem, we propose a novel topic modeling system for
                 asynchronous conversations that revises the model on
                 the fly on the basis of users' feedback. We then
                 integrate this system with interactive visualization
                 techniques to support the user in exploring long
                 conversations, as well as in revising the topic model
                 when the current results are not adequate to fulfill
                 the user's information needs. Finally, we report on an
                 evaluation with real users that compared the resulting
                 system with both a traditional interface and an
                 interactive visual interface that does not support
                 human-in-the-loop topic modeling. Both the quantitative
                 results and the subjective feedback from the
                 participants illustrate the potential benefits of our
                 interactive topic modeling approach for exploring
                 conversations, relative to its counterparts.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Jannach:2016:SDM,
  author =       "Dietmar Jannach and Michael Jugovac and Lukas Lerche",
  title =        "Supporting the Design of Machine Learning Workflows
                 with a Recommendation System",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2852082",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Machine learning and data analytics tasks in practice
                 require several consecutive processing steps.
                 RapidMiner is a widely used software tool for the
                 development and execution of such analytics workflows.
                 Unlike many other algorithm toolkits, it comprises a
                 visual editor that allows the user to design processes
                 on a conceptual level. This conceptual and visual
                 approach helps the user to abstract from the technical
                 details during the development phase and to retain a
                 focus on the core modeling task. The large set of
                 preimplemented data analysis and machine learning
                 operations available in the tool, as well as their
                 logical dependencies, can, however, be overwhelming in
                 particular for novice users. In this work, we present
                 an add-on to the RapidMiner framework that supports the
                 user during the modeling phase by recommending
                 additional operations to insert into the currently
                 developed machine learning workflow. First, we propose
                 different recommendation techniques and evaluate them
                 in an offline setting using a pool of several thousand
                 existing workflows. Second, we present the results of a
                 laboratory study, which show that our tool helps users
                 to significantly increase the efficiency of the
                 modeling process. Finally, we report on analyses using
                 data that were collected during the real-world
                 deployment of the plug-in component and compare the
                 results of the live deployment of the tool with the
                 results obtained through an offline analysis and a
                 replay simulation.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Malik:2016:HVH,
  author =       "Sana Malik and Ben Shneiderman and Fan Du and
                 Catherine Plaisant and Margret Bjarnadottir",
  title =        "High-Volume Hypothesis Testing: Systematic Exploration
                 of Event Sequence Comparisons",
  journal =      j-TIIS,
  volume =       "6",
  number =       "1",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2890478",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Sat May 21 08:06:01 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Cohort comparison studies have traditionally been
                 hypothesis driven and conducted in carefully controlled
                 environments (such as clinical trials). Given two
                 groups of event sequence data, researchers test a
                 single hypothesis (e.g., does the group taking
                 Medication A exhibit more deaths than the group taking
                 Medication B?). Recently, however, researchers have
                 been moving toward more exploratory methods of
                 retrospective analysis with existing data. In this
                 article, we begin by showing that the task of cohort
                 comparison is specific enough to support automatic
                 computation against a bounded set of potential
                 questions and objectives, a method that we refer to as
                 High-Volume Hypothesis Testing (HVHT). From this
                 starting point, we demonstrate that the diversity of
                 these objectives, both across and within different
                 domains, as well as the inherent complexities of
                 real-world datasets, still requires human involvement
                 to determine meaningful insights. We explore how
                 visualization and interaction better support the task
                 of exploratory data analysis and the understanding of
                 HVHT results (how significant they are, why they are
                 meaningful, and whether the entire dataset has been
                 exhaustively explored). Through interviews and case
                 studies with domain experts, we iteratively design and
                 implement visualization and interaction techniques in a
                 visual analytics tool, CoCo. As a result of our
                 evaluation, we propose six design guidelines for
                 enabling users to explore large result sets of HVHT
                 systematically and flexibly in order to glean
                 meaningful insights more quickly. Finally, we
                 illustrate the utility of this method with three case
                 studies in the medical domain.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Pan:2016:TLS,
  author =       "Weike Pan and Qiang Yang and Yuchao Duan and Zhong
                 Ming",
  title =        "Transfer Learning for Semisupervised Collaborative
                 Recommendation",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2835497",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Users' online behaviors such as ratings and
                 examination of items are recognized as one of the most
                 valuable sources of information for learning users'
                 preferences in order to make personalized
                 recommendations. But most previous works focus on
                 modeling only one type of users' behaviors such as
                 numerical ratings or browsing records, which are
                 referred to as explicit feedback and implicit feedback,
                 respectively. In this article, we study a
                 Semisupervised Collaborative Recommendation (SSCR)
                 problem with labeled feedback (for explicit feedback)
                 and unlabeled feedback (for implicit feedback), in
                 analogy to the well-known Semisupervised Learning (SSL)
                 setting with labeled instances and unlabeled instances.
                 SSCR is associated with two fundamental challenges,
                 that is, heterogeneity of two types of users' feedback
                 and uncertainty of the unlabeled feedback. As a
                 response, we design a novel Self-Transfer Learning
                 (sTL) algorithm to iteratively identify and integrate
                 likely positive unlabeled feedback, which is inspired
                 by the general forward/backward process in machine
                 learning. The merit of sTL is its ability to learn
                 users' preferences from heterogeneous behaviors in a
                 joint and selective manner. We conduct extensive
                 empirical studies of sTL and several very competitive
                 baselines on three large datasets. The experimental
                 results show that our sTL is significantly better than
                 the state-of-the-art methods.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Verbert:2016:AVU,
  author =       "Katrien Verbert and Denis Parra and Peter
                 Brusilovsky",
  title =        "Agents Vs. Users: Visual Recommendation of Research
                 Talks with Multiple Dimension of Relevance",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2946794",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Several approaches have been researched to help people
                 deal with abundance of information. An important
                 feature pioneered by social tagging systems and later
                 used in other kinds of social systems is the ability to
                 explore different community relevance prospects by
                 examining items bookmarked by a specific user or items
                 associated by various users with a specific tag. A
                 ranked list of recommended items offered by a specific
                 recommender engine can be considered as another
                 relevance prospect. The problem that we address is that
                 existing personalized social systems do not allow their
                 users to explore and combine multiple relevance
                 prospects. Only one prospect can be explored at any
                 given time-a list of recommended items, a list of items
                 bookmarked by a specific user, or a list of items
                 marked with a specific tag. In this article, we explore
                 the notion of combining multiple relevance prospects as
                 a way to increase effectiveness and trust. We used a
                 visual approach to recommend articles at a conference
                 by explicitly presenting multiple dimensions of
                 relevance. Suggestions offered by different
                 recommendation techniques were embodied as recommender
                 agents to put them on the same ground as users and
                 tags. The results of two user studies performed at
                 academic conferences allowed us to obtain interesting
                 insights to enhance user interfaces of personalized
                 social systems. More specifically, effectiveness and
                 probability of item selection increase when users are
                 able to explore and interrelate prospects of items
                 relevance-that is, items bookmarked by users,
                 recommendations and tags. Nevertheless, a
                 less-technical audience may require guidance to
                 understand the rationale of such intersections.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Cafaro:2016:EIA,
  author =       "Angelo Cafaro and Brian Ravenet and Magalie Ochs and
                 Hannes H{\"o}gni Vilhj{\'a}lmsson and Catherine
                 Pelachaud",
  title =        "The Effects of Interpersonal Attitude of a Group of
                 Agents on User's Presence and Proxemics Behavior",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2914796",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In the everyday world people form small conversing
                 groups where social interaction takes place, and much
                 of the social behavior takes place through managing
                 interpersonal space (i.e., proxemics) and group
                 formation, signaling their attention to others (i.e.,
                 through gaze behavior), and expressing certain
                 attitudes, for example, friendliness, by smiling,
                 getting close through increased engagement and
                 intimacy, and welcoming newcomers. Many real-time
                 interactive systems feature virtual anthropomorphic
                 characters in order to simulate conversing groups and
                 add plausibility and believability to the simulated
                 environments. However, only a few have dealt with
                 autonomous behavior generation, and in those cases, the
                 agents' exhibited behavior should be evaluated by users
                 in terms of appropriateness, believability, and
                 conveyed meaning (e.g., attitudes). In this article we
                 present an integrated intelligent interactive system
                 for generating believable nonverbal behavior exhibited
                 by virtual agents in small simulated group
                 conversations. The produced behavior supports group
                 formation management and the expression of
                 interpersonal attitudes (friendly vs. unfriendly) both
                 among the agents in the group (i.e., in-group attitude)
                 and towards an approaching user in an avatar-based
                 interaction (out-group attitude). A user study
                 investigating the effects of these attitudes on users'
                 social presence evaluation and proxemics behavior (with
                 their avatar) in a three-dimensional virtual city
                 environment is presented. We divided the study into two
                 trials according to the task assigned to users, that
                 is, joining a conversing group and reaching a target
                 destination behind the group. Results showed that the
                 out-group attitude had a major impact on social
                 presence evaluations in both trials, whereby friendly
                 groups were perceived as more socially rich. The user's
                 proxemics behavior depended on both out-group and
                 in-group attitudes expressed by the agents.
                 Implications of these results for the design and
                 implementation of similar intelligent interactive
                 systems for the autonomous generation of agents'
                 multimodal behavior are briefly discussed.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Taranta:2016:DPB,
  author =       "Eugene M. {Taranta II} and Andr{\'e}s N. Vargas and
                 Spencer P. Compton and Joseph J. {Laviola, Jr.}",
  title =        "A Dynamic Pen-Based Interface for Writing and Editing
                 Complex Mathematical Expressions With Math Boxes",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2946795",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Math boxes is a recently introduced pen-based user
                 interface for simplifying the task of hand writing
                 difficult mathematical expressions. Visible bounding
                 boxes around subexpressions are automatically generated
                 as the system detects relevant spatial relationships
                 between symbols including superscripts, subscripts, and
                 fractions. Subexpressions contained in a math box can
                 then be extended by adding new terms directly into its
                 given bounds. When new characters are accepted, box
                 boundaries are dynamically resized and neighboring
                 terms are translated to make room for the larger box.
                 Feedback on structural recognition is given via the
                 boxes themselves. In this work, we extend the math
                 boxes interface to include support for subexpression
                 modifications via a new set of pen-based interactions.
                 Specifically, techniques to expand and rearrange terms
                 in a given expression are introduced. To evaluate the
                 usefulness of our proposed methods, we first conducted
                 a user study in which participants wrote a variety of
                 equations ranging in complexity from a simple
                 polynomial to the more difficult expected value of the
                 logistic distribution. The math boxes interface is
                 compared against the commonly used offset typeset
                 (small) method, where recognized expressions are
                 typeset in a system font near the user's unmodified
                 ink. In this initial study, we find that the fluidness
                 of the offset method is preferred for simple
                 expressions but that, as difficulty increases, our math
                 boxes method is overwhelmingly preferred. We then
                 conducted a second user study that focused only on
                 modifying various mathematical expressions. In general,
                 participants worked faster with the math boxes
                 interface, and most new techniques were well received.
                 On the basis of the two user studies, we discuss the
                 implications of the math boxes interface and identify
                 areas where improvements are possible.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Yang:2016:SUS,
  author =       "Yi Yang and Shimei Pan and Jie Lu and Mercan Topkara
                 and Yangqiu Song",
  title =        "The Stability and Usability of Statistical Topic
                 Models",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2954002",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Statistical topic models have become a useful and
                 ubiquitous tool for analyzing large text corpora. One
                 common application of statistical topic models is to
                 support topic-centric navigation and exploration of
                 document collections. Existing work on topic modeling
                 focuses on the inference of model parameters so the
                 resulting model fits the input data. Since the exact
                 inference is intractable, statistical inference
                 methods, such as Gibbs Sampling, are commonly used to
                 solve the problem. However, most of the existing work
                 ignores an important aspect that is closely related to
                 the end user experience: topic model stability. When
                 the model is either re-trained with the same input data
                 or updated with new documents, the topic previously
                 assigned to a document may change under the new model,
                 which may result in a disruption of end users' mental
                 maps about the relations between documents and topics,
                 thus undermining the usability of the applications. In
                 this article, we propose a novel user-directed
                 non-disruptive topic model update method that balances
                 the tradeoff between finding the model that fits the
                 data and maintaining the stability of the model from
                 end users' perspective. It employs a novel constrained
                 LDA algorithm to incorporate pairwise document
                 constraints, which are converted from user feedback
                 about topics, to achieve topic model stability.
                 Evaluation results demonstrate the advantages of our
                 approach over previous methods.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Kveton:2016:MIC,
  author =       "Branislav Kveton and Shlomo Berkovsky",
  title =        "Minimal Interaction Content Discovery in Recommender
                 Systems",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2845090",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Many prior works in recommender systems focus on
                 improving the accuracy of item rating predictions. In
                 comparison, the areas of recommendation interfaces and
                 user-recommender interaction remain underexplored. In
                 this work, we look into the interaction of users with
                 the recommendation list, aiming to devise a method that
                 simplifies content discovery and minimizes the cost of
                 reaching an item of interest. We quantify this cost by
                 the number of user interactions (clicks and scrolls)
                 with the recommendation list. To this end, we propose
                 generalized linear search (GLS), an adaptive
                 combination of the established linear and generalized
                 search (GS) approaches. GLS leverages the advantages of
                 these two approaches, and we prove formally that it
                 performs at least as well as GS. We also conduct a
                 thorough experimental evaluation of GLS and compare it
                 to several baselines and heuristic approaches in both
                 an offline and live evaluation. The results of the
                 evaluation show that GLS consistently outperforms the
                 baseline approaches and is also preferred by users. In
                 summary, GLS offers an efficient and easy-to-use means
                 for content discovery in recommender systems.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Zhang:2016:BTE,
  author =       "Cheng Zhang and Anhong Guo and Dingtian Zhang and Yang
                 Li and Caleb Southern and Rosa I. Arriaga and Gregory
                 D. Abowd",
  title =        "Beyond the Touchscreen: an Exploration of Extending
                 Interactions on Commodity {Smartphones}",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2954003",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Most smartphones today have a rich set of sensors that
                 could be used to infer input (e.g., accelerometer,
                 gyroscope, microphone); however, the primary mode of
                 interaction is still limited to the front-facing
                 touchscreen and several physical buttons on the case.
                 To investigate the potential opportunities for
                 interactions supported by built-in sensors, we present
                 the implementation and evaluation of BeyondTouch, a
                 family of interactions to extend and enrich the input
                 experience of a smartphone. Using only existing sensing
                 capabilities on a commodity smartphone, we offer the
                 user a wide variety of additional inputs on the case
                 and the surface adjacent to the smartphone. Although
                 most of these interactions are implemented with machine
                 learning methods, compact and robust rule-based
                 detection methods can also be applied for recognizing
                 some interactions by analyzing physical characteristics
                 of tapping events on the phone. This article is an
                 extended version of Zhang et al. [2015], which solely
                 covered gestures implemented by machine learning
                 methods. We extended our previous work by adding
                 gestures implemented with rule-based methods, which
                 works well with different users across devices without
                 collecting any training data. We outline the
                 implementation of both machine learning and rule-based
                 methods for these interaction techniques and
                 demonstrate empirical evidence of their effectiveness
                 and usability. We also discuss the practicality of
                 BeyondTouch for a variety of application scenarios and
                 compare the two different implementation methods.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Bosch:2016:UVA,
  author =       "Nigel Bosch and Sidney K. D'mello and Jaclyn Ocumpaugh
                 and Ryan S. Baker and Valerie Shute",
  title =        "Using Video to Automatically Detect Learner Affect in
                 Computer-Enabled Classrooms",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2946837",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Affect detection is a key component in intelligent
                 educational interfaces that respond to students'
                 affective states. We use computer vision and
                 machine-learning techniques to detect students' affect
                 from facial expressions (primary channel) and gross
                 body movements (secondary channel) during interactions
                 with an educational physics game. We collected data in
                 the real-world environment of a school computer lab
                 with up to 30 students simultaneously playing the game
                 while moving around, gesturing, and talking to each
                 other. The results were cross-validated at the student
                 level to ensure generalization to new students.
                 Classification accuracies, quantified as area under the
                 receiver operating characteristic curve (AUC), were
                 above chance (AUC of 0.5) for all the affective states
                 observed, namely, boredom (AUC = .610), confusion (AUC
                 = .649), delight (AUC = .867), engagement (AUC = .679),
                 frustration (AUC = .631), and for off-task behavior
                 (AUC = .816). Furthermore, the detectors showed
                 temporal generalizability in that there was less than a
                 2\% decrease in accuracy when tested on data collected
                 from different times of the day and from different
                 days. There was also some evidence of generalizability
                 across ethnicity (as perceived by human coders) and
                 gender, although with a higher degree of variability
                 attributable to differences in affect base rates across
                 subpopulations. In summary, our results demonstrate the
                 feasibility of generalizable video-based detectors of
                 naturalistic affect in a real-world setting, suggesting
                 that the time is ripe for affect-sensitive
                 interventions in educational games and other
                 intelligent interfaces.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Tanaka:2016:TSC,
  author =       "Hiroki Tanaka and Sakti Sakriani and Graham Neubig and
                 Tomoki Toda and Hideki Negoro and Hidemi Iwasaka and
                 Satoshi Nakamura",
  title =        "Teaching Social Communication Skills Through
                 Human-Agent Interaction",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2937757",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "There are a large number of computer-based systems
                 that aim to train and improve social skills. However,
                 most of these do not resemble the training regimens
                 used by human instructors. In this article, we propose
                 a computer-based training system that follows the
                 procedure of social skills training (SST), a
                 well-established method to decrease human anxiety and
                 discomfort in social interaction, and acquire social
                 skills. We attempt to automate the process of SST by
                 developing a dialogue system named the automated social
                 skills trainer, which teaches social communication
                 skills through human-agent interaction. The system
                 includes a virtual avatar that recognizes user speech
                 and language information and gives feedback to users.
                 Its design is based on conventional SST performed by
                 human participants, including defining target skills,
                 modeling, role-play, feedback, reinforcement, and
                 homework. We performed a series of three experiments
                 investigating (1) the advantages of using
                 computer-based training systems compared to human-human
                 interaction (HHI) by subjectively evaluating
                 nervousness, ease of talking, and ability to talk well;
                 (2) the relationship between speech language features
                 and human social skills; and (3) the effect of
                 computer-based training using our proposed system.
                 Results of our first experiment show that interaction
                 with an avatar decreases nervousness and increases the
                 user's subjective impression of his or her ability to
                 talk well compared to interaction with an unfamiliar
                 person. The experimental evaluation measuring the
                 relationship between social skill and speech and
                 language features shows that these features have a
                 relationship with social skills. Finally, experiments
                 measuring the effect of performing SST with the
                 proposed application show that participants
                 significantly improve their skill, as assessed by
                 separate evaluators, by using the system for 50
                 minutes. A user survey also shows that the users
                 thought our system is useful and easy to use, and that
                 interaction with the avatar felt similar to HHI.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Mahmoud:2016:AAN,
  author =       "Marwa Mahmoud and Tadas Baltrusaitis and Peter
                 Robinson",
  title =        "Automatic Analysis of Naturalistic Hand-Over-Face
                 Gestures",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2946796",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "One of the main factors that limit the accuracy of
                 facial analysis systems is hand occlusion. As the face
                 becomes occluded, facial features are lost, corrupted,
                 or erroneously detected. Hand-over-face occlusions are
                 considered not only very common but also very
                 challenging to handle. However, there is empirical
                 evidence that some of these hand-over-face gestures
                 serve as cues for recognition of cognitive mental
                 states. In this article, we present an analysis of
                 automatic detection and classification of
                 hand-over-face gestures. We detect hand-over-face
                 occlusions and classify hand-over-face gesture
                 descriptors in videos of natural expressions using
                 multi-modal fusion of different state-of-the-art
                 spatial and spatio-temporal features. We show
                 experimentally that we can successfully detect face
                 occlusions with an accuracy of 83\%. We also
                 demonstrate that we can classify gesture descriptors (
                 hand shape, hand action, and facial region occluded )
                 significantly better than a na{\"\i}ve baseline. Our
                 detailed quantitative analysis sheds some light on the
                 challenges of automatic classification of
                 hand-over-face gestures in natural expressions.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Ishii:2016:URP,
  author =       "Ryo Ishii and Kazuhiro Otsuka and Shiro Kumano and
                 Junji Yamato",
  title =        "Using Respiration to Predict Who Will Speak Next and
                 When in Multiparty Meetings",
  journal =      j-TIIS,
  volume =       "6",
  number =       "2",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2946838",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:13 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Techniques that use nonverbal behaviors to predict
                 turn-changing situations-such as, in multiparty
                 meetings, who the next speaker will be and when the
                 next utterance will occur-have been receiving a lot of
                 attention in recent research. To build a model for
                 predicting these behaviors we conducted a research
                 study to determine whether respiration could be
                 effectively used as a basis for the prediction. Results
                 of analyses of utterance and respiration data collected
                 from participants in multiparty meetings reveal that
                 the speaker takes a breath more quickly and deeply
                 after the end of an utterance in turn-keeping than in
                 turn-changing. They also indicate that the listener who
                 will be the next speaker takes a bigger breath more
                 quickly and deeply in turn-changing than the other
                 listeners. On the basis of these results, we
                 constructed and evaluated models for predicting the
                 next speaker and the time of the next utterance in
                 multiparty meetings. The results of the evaluation
                 suggest that the characteristics of the speaker's
                 inhalation right after an utterance unit-the points in
                 time at which the inhalation starts and ends after the
                 end of the utterance unit and the amplitude, slope, and
                 duration of the inhalation phase-are effective for
                 predicting the next speaker in multiparty meetings.
                 They further suggest that the characteristics of
                 listeners' inhalation-the points in time at which the
                 inhalation starts and ends after the end of the
                 utterance unit and the minimum and maximum inspiration,
                 amplitude, and slope of the inhalation phase-are
                 effective for predicting the next speaker. The start
                 time and end time of the next speaker's inhalation are
                 also useful for predicting the time of the next
                 utterance in turn-changing.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Prendinger:2016:IBT,
  author =       "Helmut Prendinger and Nahum Alvarez and Antonio
                 Sanchez-Ruiz and Marc Cavazza and jo{\~a}o Catarino and
                 Jo{\~a}o Oliveira and Rui Prada and Shuji Fujimoto and
                 Mika Shigematsu",
  title =        "Intelligent Biohazard Training Based on Real-Time Task
                 Recognition",
  journal =      j-TIIS,
  volume =       "6",
  number =       "3",
  pages =        "21:1--21:??",
  month =        oct,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2883617",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:14 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Virtual environments offer an ideal setting to develop
                 intelligent training applications. Yet, their ability
                 to support complex procedures depends on the
                 appropriate integration of knowledge-based techniques
                 and natural interaction. In this article, we describe
                 the implementation of an intelligent rehearsal system
                 for biohazard laboratory procedures, based on the
                 real-time instantiation of task models from the
                 trainee's actions. A virtual biohazard laboratory has
                 been recreated using the Unity3D engine, in which users
                 interact with laboratory objects using keyboard/mouse
                 input or hand gestures through a Kinect device.
                 Realistic behavior for objects is supported by the
                 implementation of a relevant subset of common sense and
                 physics knowledge. User interaction with objects leads
                 to the recognition of specific actions, which are used
                 to progressively instantiate a task-based
                 representation of biohazard procedures. The dynamics of
                 this instantiation process supports trainee evaluation
                 as well as real-time assistance. This system is
                 designed primarily as a rehearsal system providing
                 real-time advice and supporting user performance
                 evaluation. We provide detailed examples illustrating
                 error detection and recovery, and results from on-site
                 testing with students from the Faculty of Medical
                 Sciences at Kyushu University. In the study, we
                 investigate the usability aspect by comparing
                 interaction with mouse and Kinect devices and the
                 effect of real-time task recognition on recovery time
                 after user mistakes.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Sappelli:2016:AIA,
  author =       "Maya Sappelli and Suzan Verberne and Wessel Kraaij",
  title =        "Adapting the Interactive Activation Model for Context
                 Recognition and Identification",
  journal =      j-TIIS,
  volume =       "6",
  number =       "3",
  pages =        "22:1--22:??",
  month =        oct,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2873067",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:14 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "In this article, we propose and implement a new model
                 for context recognition and identification. Our work is
                 motivated by the importance of ``working in context''
                 for knowledge workers to stay focused and productive. A
                 computer application that can identify the current
                 context in which the knowledge worker is working can
                 (among other things) provide the worker with contextual
                 support, for example, by suggesting relevant
                 information sources, or give an overview of how he or
                 she spent his or her time during the day. We present a
                 descriptive model for the context of a knowledge
                 worker. This model describes the contextual elements in
                 the work environment of the knowledge worker and how
                 these elements relate to each other. This model is
                 operationalized in an algorithm, the contextual
                 interactive activation model (CIA), which is based on
                 the interactive activation model by Rumelhart and
                 McClelland. It consists of a layered connected network
                 through which activation flows. We have tested CIA in a
                 context identification setting. In this case, the data
                 that we use as input is low-level computer interaction
                 logging data. We found that topical information and
                 entities were the most relevant types of information
                 for context identification. Overall the proposed CIA
                 model is more effective than traditional supervised
                 methods in identifying the active context from sparse
                 input data, with less labelled training data.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Aslan:2016:DEM,
  author =       "Ilhan Aslan and Andreas Uhl and Alexander
                 Meschtscherjakov and Manfred Tscheligi",
  title =        "Design and Exploration of Mid-Air Authentication
                 Gestures",
  journal =      j-TIIS,
  volume =       "6",
  number =       "3",
  pages =        "23:1--23:??",
  month =        oct,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2832919",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:14 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Authentication based on touchless mid-air gestures
                 would benefit a multitude of ubiquitous computing
                 applications, especially those that are used in clean
                 environments (e.g., medical environments or clean
                 rooms). In order to explore the potential of mid-air
                 gestures for novel authentication approaches, we
                 performed a series of studies and design experiments.
                 First, we collected data from more then 200 users
                 during a 3-day science event organized within a
                 shopping mall. These data were used to investigate
                 capabilities of the Leap Motion sensor, observe
                 interaction in the wild, and to formulate an initial
                 design problem. The design problem, as well as the
                 design of mid-air gestures for authentication purposes,
                 were iterated in subsequent design activities. In a
                 final study with 13 participants, we evaluated two
                 mid-air gestures for authentication purposes in
                 different situations, including different body
                 positions. Our results highlight a need for different
                 mid-air gestures for differing situations and carefully
                 chosen constraints for mid-air gestures. We conclude by
                 proposing an exemplary system, which aims to provide
                 tool-support for designers and engineers, allowing them
                 to explore authentication gestures in the original
                 context of use and thus support them with the design of
                 contextual mid-air authentication gestures.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{El-Glaly:2016:RWY,
  author =       "Yasmine N. El-Glaly and Francis Quek",
  title =        "Read What You Touch with Intelligent Audio System for
                 Non-Visual Interaction",
  journal =      j-TIIS,
  volume =       "6",
  number =       "3",
  pages =        "24:1--24:??",
  month =        oct,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2822908",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:14 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Slate-type devices allow Individuals with Blindness or
                 Severe Visual Impairment (IBSVI) to read in place with
                 the touch of their fingertip by audio-rendering the
                 words they touch. Such technologies are helpful for
                 spatial cognition while reading. However, users have to
                 move their fingers slowly, or they may lose their place
                 on screen. Also, IBSVI may wander between lines without
                 realizing they did. We addressed these two interaction
                 problems by introducing a dynamic speech-touch
                 interaction model and an intelligent reading support
                 system. With this model, the speed of the speech will
                 dynamically change with the user's finger speed. The
                 proposed model is composed of (1) an Audio Dynamics
                 Model and (2) an Off-line Speech Synthesis Technique.
                 The intelligent reading support system predicts the
                 direction of reading, corrects the reading word if the
                 user drifts, and notifies the user using a sonic gutter
                 to help him/her from straying off the reading line. We
                 tested the new audio dynamics model, the sonic gutter,
                 and the reading support model in two user studies. The
                 participants' feedback helped us fine-tune the
                 parameters of the two models. A decomposition study was
                 conducted to evaluate the main components of the
                 system. The results showed that both intelligent
                 reading support with tactile feedback are required to
                 achieve the best performance in terms of efficiency and
                 effectiveness. Finally, we ran an evaluation study
                 where the reading support system is compared to other
                 VoiceOver technologies. The results showed
                 preponderance to the reading support system with its
                 audio dynamics and intelligent reading support
                 components.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}

@Article{Park:2016:MAP,
  author =       "Sunghyun Park and Han Suk Shim and Moitreya Chatterjee
                 and Kenji Sagae and Louis-Philippe Morency",
  title =        "Multimodal Analysis and Prediction of Persuasiveness
                 in Online Social Multimedia",
  journal =      j-TIIS,
  volume =       "6",
  number =       "3",
  pages =        "25:1--25:??",
  month =        oct,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2897739",
  ISSN =         "2160-6455 (print), 2160-6463 (electronic)",
  bibdate =      "Tue Oct 18 11:51:14 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/tiis.bib",
  abstract =     "Our lives are heavily influenced by persuasive
                 communication, and it is essential in almost any type
                 of social interaction from business negotiation to
                 conversation with our friends and family. With the
                 rapid growth of social multimedia websites, it is
                 becoming ever more important and useful to understand
                 persuasiveness in the context of social multimedia
                 content online. In this article, we introduce a newly
                 created multimedia corpus of 1,000 movie review videos
                 with subjective annotations of persuasiveness and
                 related high-level characteristics or attributes (e.g.,
                 confidence). This dataset will be made freely available
                 to the research community. We designed our experiments
                 around the following five main research hypotheses.
                 First, we study if computational descriptors derived
                 from verbal and nonverbal behavior can be predictive of
                 persuasiveness. We further explore combining
                 descriptors from multiple communication modalities
                 (acoustic, verbal, para-verbal, and visual) for
                 predicting persuasiveness and compare with using a
                 single modality alone. Second, we investigate how
                 certain high-level attributes, such as credibility or
                 expertise, are related to persuasiveness and how the
                 information can be used in modeling and predicting
                 persuasiveness. Third, we investigate differences when
                 speakers are expressing a positive or negative opinion
                 and if the opinion polarity has any influence in the
                 persuasiveness prediction. Fourth, we further study if
                 gender has any influence in the prediction performance.
                 Last, we test if it is possible to make comparable
                 predictions of persuasiveness by only looking at thin
                 slices (i.e., shorter time windows) of a speaker's
                 behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Interactive Intelligent Systems
                 (TIIS)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J1341",
}