@inproceedings{39b814fd927d430fb3bd51de5bd826dd,
title = "Audio-visual affective expression recognition",
abstract = "Automatic affective expression recognition has attracted more and more attention of researchers from different disciplines, which will significantly contribute to a new paradigm for human computer interaction (affect-sensitive interfaces, socially intelligent environments) and advance the research in the affect-related fields including psychology, psychiatry, and education. Multimodal information integration is a process that enables human to assess affective states robustly and flexibly. In order to understand the richness and subtleness of human emotion behavior, the computer should be able to integrate information from multiple sensors. We introduce in this paper our efforts toward machine understanding of audio-visual affective behavior, based on both deliberate and spontaneous displays. Some promising methods are presented to integrate information from both audio and visual modalities. Our experiments show the advantage of audio-visual fusion in affective expression recognition over audio-only or visual-only approaches.",
keywords = "Affect recognition, Affective computing, Emotion recognition, Human computing, Multimodal human computer interaction",
author = "Huang, {Thomas S} and Zhihong Zeng",
year = "2007",
month = dec,
day = "1",
doi = "10.1117/12.782299",
language = "English (US)",
isbn = "9780819469526",
series = "Proceedings of SPIE - The International Society for Optical Engineering",
booktitle = "MIPPR 2007",
note = "MIPPR 2007: Pattern Recognition and Computer Vision ; Conference date: 15-11-2007 Through 17-11-2007",
}