@inproceedings{90117f44236349b78271d9060c51ab74,
title = "VACE multimodal meeting corpus",
abstract = "In this paper, we report on the infrastructure we have developed to support our research on multimodal cues for understanding meetings. With our focus on multimodality, we investigate the interaction among speech, gesture, posture, and gaze in meetings. For this purpose, a high quality multimodal corpus is being produced.",
author = "Lei Chen and Rose, {R. Travis} and Ying Qiao and Irene Kimbara and Fey Parrill and Haleema Welji and Han, {Tony Xu} and Jilin Tu and Zhongqiang Huang and Mary Harper and Francis Quek and Yingen Xiong and David McNeill and Ronald Tuttle and Thomas Huang",
year = "2006",
doi = "10.1007/11677482_4",
language = "English (US)",
isbn = "3540325492",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "40--51",
booktitle = "Machine Learning for Multimodal Interaction - Second International Workshop, MLMI 2005, Revised Selected Papers",
address = "Germany",
note = "2nd International Workshop on Machine Learning for Multimodal Interaction, MLMI 2005 ; Conference date: 11-07-2005 Through 13-07-2005",
}