@inproceedings{c9bc3ddd264f439fb790c065e83191ea,
title = "Empathetic video experience through timely multimodal interaction",
abstract = "In this paper, we describe a video playing system, named {"}Empatheater,{"} that is controlled by multimodal interaction. As the video is played, the user must interact and emulate predefined video {"}events{"} through multimodal guidance and whole body interaction (e.g. following the main character's motion or gestures). Without the timely interaction, the video stops. The system shows guidance information as how to properly react and continue the video playing. The purpose of such a system is to provide indirect experience (of the given video content) by eliciting the user to mimic and empathize with the main character. The user is given the illusion (suspended disbelief) of playing an active role in the unraveling video content. We discuss various features of the newly proposed interactive medium. In addition, we report on the results of the pilot study that was carried out to evaluate its user experience compared to passive video viewing and keyboard based video control.",
keywords = "empathy, interactive video, multimodality, user experience, user guidance",
author = "Myunghee Lee and Kim, {Gerard J.}",
year = "2010",
doi = "10.1145/1891903.1891948",
language = "English",
isbn = "9781450304146",
series = "International Conference on Multimodal Interfaces and the Workshop on Machine Learning for Multimodal Interaction, ICMI-MLMI 2010",
booktitle = "International Conference on Multimodal Interfaces and the Workshop on Machine Learning for Multimodal Interaction, ICMI-MLMI 2010",
note = "1st International Conference on Multimodal Interfaces and the Workshop on Machine Learning for Multimodal Interaction, ICMI-MLMI 2010 ; Conference date: 08-11-2010 Through 10-11-2010",
}