@inproceedings{e3821f30bcb14f86bd22c8cac973de0e,
title = "Active reinforcement learning",
abstract = "When the transition probabilities and rewards of a Markov Decision Process (MDP) are known, an agent can obtain the optimal policy without any interaction with the environment. However, exact transition probabilities are difficult for experts to specify. One option left to an agent is a long and potentially costly exploration of the environment. In this paper, we propose another alternative: given initial (possibly inaccurate) specification of the MDP, the agent determines the sensitivity of the optimal policy to changes in transitions and rewards. It then focuses its exploration on the regions of space to which the optimal policy is most sensitive. We show that the proposed exploration strategy performs well on several control and planning problems.",
author = "Arkady Epshteyn and Adam Vogel and Gerald Dejong",
year = "2008",
doi = "10.1145/1390156.1390194",
language = "English (US)",
isbn = "9781605582054",
series = "Proceedings of the 25th International Conference on Machine Learning",
publisher = "Association for Computing Machinery (ACM)",
pages = "296--303",
booktitle = "Proceedings of the 25th International Conference on Machine Learning",
address = "United States",
note = "25th International Conference on Machine Learning ; Conference date: 05-07-2008 Through 09-07-2008",
}