@inproceedings{d6291ce5f35b448fad3e77f90edb3efa,
title = "Q-learning and Pontryagin's minimum principle",
abstract = "Q-learning is a technique used to compute an optimal policy for a controlled Markov chain based on observations of the system controlled using a non-optimal policy. It has proven to be effective for models with finite state and action space. This paper establishes connections between Q-learning and nonlinear control of continuous-time models with general state space and general action space. The main contributions are summarized as follows. (i) The starting point is the observation that the {"}Q-function{"} appearing in Q-learning algorithms is an extension of the Hamiltonian that appears in the Minimum Principle. Based on this observation we introduce the steepest descent Q-learning algorithm to obtain the optimal approximation of the Hamiltonian within a prescribed function class. (ii) A transformation of the optimality equations is performed based on the adjoint of a resolvent operator. This is used to construct a consistent algorithm based on stochastic approximation that requires only causal filtering of observations. (iii) Several examples are presented to illustrate the application of these techniques, including application to distributed control of multi-agent systems.",
author = "Prashant Mehta and Sean Meyn",
year = "2009",
doi = "10.1109/CDC.2009.5399753",
language = "English (US)",
isbn = "9781424438716",
series = "Proceedings of the IEEE Conference on Decision and Control",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "3598--3605",
booktitle = "Proceedings of the 48th IEEE Conference on Decision and Control held jointly with 2009 28th Chinese Control Conference, CDC/CCC 2009",
address = "United States",
note = "48th IEEE Conference on Decision and Control held jointly with 2009 28th Chinese Control Conference, CDC/CCC 2009 ; Conference date: 15-12-2009 Through 18-12-2009",
}