@inproceedings{05e6c6dca58e484cb3ed22601cf2d1b3,
title = "Abstraction selection in model-based reinforcement learning",
abstract = "State abstractions are often used to reduce the complexity of model-based reinforcement learning when only limited quantities of data are available. However, choosing the appropriate level of abstraction is an important problem in practice. Existing approaches have theoretical guarantees only under strong assumptions on the domain or asymptotically large amounts of data, but in this paper we propose a simple algorithm based on statistical hypothesis testing that comes with a finite-sample guarantee under assumptions on candidate abstractions. Our algorithm trades off the low approximation error of finer abstractions against the low estimation error of coarser abstractions, resulting in a loss bound that depends only on the quality of the best available abstraction and is polynomial in planning horizon.",
author = "Nan Jiang and Alex Kulesza and Satinder Singh",
year = "2015",
language = "English (US)",
series = "32nd International Conference on Machine Learning, ICML 2015",
publisher = "International Machine Learning Society (IMLS)",
pages = "179--188",
editor = "Francis Bach and David Blei",
booktitle = "32nd International Conference on Machine Learning, ICML 2015",
note = "32nd International Conference on Machine Learning, ICML 2015 ; Conference date: 06-07-2015 Through 11-07-2015",
}