@inproceedings{17d8c7c7f1204cfb8a22093be506095f,
title = "Agent-based model construction using inverse reinforcement learning",
abstract = "Agent-based modeling (ABM) assumes that behavioral rules affecting an agent's states and actions are known. However, discovering these rules is often challenging and requires deep insight about an agent's behaviors. Inverse reinforcement learning (IRL) can complement ABM by providing a systematic way to find behavioral rules from data. IRL frames learning behavioral rules as a problem of recovering motivations from observed behavior and generating rules consistent with these motivations. In this paper, we propose a method to construct an agent-based model directly from data using IRL. We explain each step of the proposed method and describe challenges that may occur during implementation. Our experimental results show that the proposed method can extract rules and construct an agent-based model with rich but concise behavioral rules for agents while still maintaining aggregate-level properties.",
author = "Kamwoo Lee and Mark Rucker and Scherer, {William T.} and Beling, {Peter A.} and Gerber, {Matthew S.} and Hyojung Kang",
year = "2017",
month = jun,
day = "28",
doi = "10.1109/WSC.2017.8247872",
language = "English (US)",
series = "Proceedings - Winter Simulation Conference",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1264--1275",
editor = "Victor Chan",
booktitle = "2017 Winter Simulation Conference, WSC 2017",
address = "United States",
note = "2017 Winter Simulation Conference, WSC 2017 ; Conference date: 03-12-2017 Through 06-12-2017",
}