@inproceedings{fce6d95e9a28420dabdae8dfdcacff46,
title = "KILM: Knowledge Injection into Encoder-Decoder Language Models",
abstract = "Large pre-trained language models (PLMs) have been shown to retain implicit knowledge within their parameters. To enhance this implicit knowledge, we propose Knowledge Injection into Language Models (KILM), a novel approach that injects entity-related knowledge into encoder-decoder PLMs, via a generative knowledge infilling objective through continued pre-training. This is done without architectural modifications to the PLMs or adding additional parameters. Experimental results over a suite of knowledge-intensive tasks spanning numerous datasets show that KILM enables models to retain more knowledge and hallucinate less while preserving their original performance on general NLU and NLG tasks. KILM also demonstrates improved zero-shot performances on tasks such as entity disambiguation, outperforming state-of-the-art models having 30x more parameters.",
author = "Yan Xu and Mahdi Namazifar and Devamanyu Hazarika and Aishwarya Padmakumar and Yang Liu and Dilek Hakkani-T{\"u}r",
note = "Publisher Copyright: {\textcopyright} 2023 Association for Computational Linguistics.; 61st Annual Meeting of the Association for Computational Linguistics, ACL 2023 ; Conference date: 09-07-2023 Through 14-07-2023",
year = "2023",
doi = "10.18653/v1/2023.acl-long.275",
language = "English (US)",
series = "Proceedings of the Annual Meeting of the Association for Computational Linguistics",
publisher = "Association for Computational Linguistics (ACL)",
pages = "5013--5035",
booktitle = "Long Papers",
address = "United States",
}