@inbook{3ebd903ccfb8493598ae9ef76542f91e,
title = "Multimodality and Language Learning",
abstract = "The term multimodality refers to the combination of multiple sensory and communicative modes, such as sight, sound, print, images, video, music, and so on, that produce meaning in any given message. In a sense, all communication is multimodal in that even in pre‐digital times meaning was produced not solely through writing but through choice of font, illustrations, page design, and so on, and in spoken communications through both linguistic and paralinguistic means. In the digital age, multimodality has become even more central to communication, and this is especially true for language learners, who depend on the multiplicity of channels available on a screen to help them “pick up” meaning in a target language. The question this chapter addresses is, how does this happen? Is it that the different modes function redundantly as their own type of “language,” or is it that the different modes contribute through the coordination of different types of signals governed by different principles of signification? In this chapter I argue for the latter explanation over the former, and in conclusion propose four principles of multimodality and informal language learning based on the work of C. S. Peirce and Paul Grice.",
keywords = "multimodality, semiotics, icon, index, symbol, pragmatism, cooperative principle",
author = "Mark Dressman",
year = "2020",
month = feb,
doi = "10.1002/9781119472384.ch3",
language = "English (US)",
pages = "39--55",
editor = "Mark Dressman and Sadler, {Randall William}",
booktitle = "The Handbook of Informal Language Learning",
publisher = "Wiley-Blackwell",
address = "United States",
}