@inproceedings{404d436e35c640a6b484552721e1dbac,
title = "Convex Q-Learning",
abstract = "It is well known that the extension of Watkins' algorithm to general function approximation settings is challenging: does the 'projected Bellman equation' have a solution? If so, is the solution useful in the sense of generating a good policy? And, if the preceding questions are answered in the affirmative, is the algorithm consistent? These questions are unanswered even in the special case of Q-function approximations that are linear in the parameter. The challenge seems paradoxical, given the long history of convex analytic approaches to dynamic programming. Our main contributions are summarized as follows: (i)A new class of convex Q-learning algorithms is introduced based on a convex relaxation of the Bellman equation. Convergence is established under general conditions for linear function approximation. (ii)A batch implementation appears similar to LSPI and DQN algorithms, but the difference is substantial: while convex Q-learning solves a convex program that approximates the Bellman equation, theory for DQN is no stronger than for Watkins algorithm with function approximation. These results are obtained for deterministic nonlinear systems with total cost criterion. Extensions are proposed.",
author = "Fan Lu and Mehta, {Prashant G.} and Meyn, {Sean P.} and Gergely Neu",
note = "Publisher Copyright: {\textcopyright} 2021 American Automatic Control Council.; 2021 American Control Conference, ACC 2021 ; Conference date: 25-05-2021 Through 28-05-2021",
year = "2021",
month = may,
day = "25",
doi = "10.23919/ACC50511.2021.9483244",
language = "English (US)",
series = "Proceedings of the American Control Conference",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "4749--4756",
booktitle = "2021 American Control Conference, ACC 2021",
address = "United States",
}