@inproceedings{4efe977bcbcf47059995b4545aac5aa6,
title = "Primal-Dual Algorithm for Distributed Reinforcement Learning: Distributed GTD",
abstract = "The goal of this paper is to study a distributed version of the gradient temporal-difference (GTD) learning algorithm for multi-agent Markov decision processes (MDPs). The temporal-difference (TD) learning is a reinforcement learning (RL) algorithm that learns an infinite horizon discounted cost function (or value function) for a given fixed policy without the model knowledge. In the distributed RL case each agent receives local reward through local processing. Information exchange over sparse communication network allows the agents to learn the global value function corresponding to a global reward, which is a sum of local rewards. In this paper, the problem is converted into a constrained convex optimization problem with a consensus constraint. We then propose a primal-dual distributed GTD algorithm and prove that it almost surely converges to a set of stationary points of the optimization problem.",
author = "Donghwan Lee and Hyungjin Yoon and Naira Hovakimyan",
note = "Publisher Copyright: {\textcopyright} 2018 IEEE.; 57th IEEE Conference on Decision and Control, CDC 2018 ; Conference date: 17-12-2018 Through 19-12-2018",
year = "2018",
month = jul,
day = "2",
doi = "10.1109/CDC.2018.8619839",
language = "English (US)",
series = "Proceedings of the IEEE Conference on Decision and Control",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1967--1972",
booktitle = "2018 IEEE Conference on Decision and Control, CDC 2018",
address = "United States",
}