@inproceedings{8bcacd104a7f4d6e9afb65f67d936df7,
title = "AudioXtend: Assisted Reality Visual Accompaniments for Audiobook Storytelling During Everyday Routine Tasks",
abstract = "The rise of multitasking in contemporary lifestyles has positioned audio-first content as an essential medium for information consumption. We present AudioXtend, an approach to augment audiobook experiences during daily tasks by integrating glanceable, AI-generated visuals through optical see-through head-mounted displays (OHMDs). Our initial study showed that these visual augmentations not only preserved users' primary task efficiency but also dramatically enhanced immediate auditory content recall by 33.3% and 7-day recall by 32.7%, alongside a marked improvement in narrative engagement. Through participatory design workshops involving digital arts designers, we crafted a set of design principles for visual augmentations that are attuned to the requirements of multitaskers. Finally, a 3-day take-home field study further revealed new insights for everyday use, underscoring the potential of assisted reality (aR) to enhance heads-up listening and incidental learning experiences.",
keywords = "Assisted Reality, Audiobook Augmentation, Heads-Up Computing, Incidental learning, Optical See-Through Head-Mounted Displays, Recall Enhancement, Smart-glasses, Visual Storytelling",
author = "Tan, {Felicia Fang Yi} and Peisen Xu and Ashwin Ram and Suen, {Wei Zhen} and Shengdong Zhao and Yun Huang and Christophe Hurter",
note = "We are thankful for the support that this project has received from multiple sources. It is funded by the National Research Foundation, Singapore, under two programs: the AI Singapore Programme (AISG Award No: AISG2-RP-2020-016) and the Campus for Research Excellence and Technological Enterprise (CREATE) as part of the DesCartes programme. The Ministry of Education, Singapore, has also contributed through its MOE Academic Research Fund Tier 2 programme (MOE-T2EP20221-0010). Additionally, the CityU Start-up Grant has provided partial support. We extend our gratitude to Muhammad Fahim Tajwar for his assistance with the design of figures, Yu-Rou Lin, and Synteraction Lab members for their valuable feedback in different stages of our research.; 2024 CHI Conference on Human Factors in Computing Sytems, CHI 2024 ; Conference date: 11-05-2024 Through 16-05-2024",
year = "2024",
month = may,
day = "11",
doi = "10.1145/3613904.3642514",
language = "English (US)",
series = "Conference on Human Factors in Computing Systems - Proceedings",
publisher = "Association for Computing Machinery",
booktitle = "CHI 2024 - Proceedings of the 2024 CHI Conference on Human Factors in Computing Sytems",
address = "United States",
}