@inproceedings{cb46ff6b2078484ca6aec3bf05cdee9f,
title = "Achieving Computation-Communication Overlap with Overdecomposition on GPU Systems",
abstract = "The landscape of high performance computing is shifting towards a collection of multi-GPU nodes, widening the gap between on-node compute and off-node communication capabilities. Consequently, the ability to tolerate communication latencies and maximize utilization of the compute hardware are becoming increasingly important in achieving high performance. Overdecomposition has been successfully adopted on traditional CPU-based systems to achieve computation-communication overlap, significantly reducing the impact of communication on application performance. However, it has been unclear whether overdecomposition can provide the same benefits on modern GPU systems. In this work, we address the challenges in achieving computation-communication overlap with overdecomposition on GPU systems using the Charm++ parallel programming system. By prioritizing communication with CUDA streams in the application and supporting asynchronous progress of GPU operations in the Charm++ runtime system, we obtain improvements in overall performance of up to 50% and 47% with proxy applications Jacobi3D and MiniMD, respectively. ",
keywords = "GPU computing, asynchronous task-based runtime, computation-communication overlap, overde-composition",
author = "Jaemin Choi and Richard, {David F.} and Kale, {Laxmikant V.}",
note = "Funding Information: This work was performed under the auspices of the U.S. Department of Energy (DOE) by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344 (LLNL-CONF-814558). Funding Information: This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. DOE under Contract No. DE-AC05-00OR22725. Funding Information: This research was supported by the Exascale Computing Project (17-SC-20-SC), a collaborative effort of the U.S. DOE Office of Science and the National Nuclear Security Administration. Publisher Copyright: {\textcopyright} 2020 IEEE.; 5th IEEE/ACM International IEEE Workshop on Extreme Scale Programming Models and Middleware, ESPM2 2020 ; Conference date: 11-11-2020",
year = "2020",
month = nov,
doi = "10.1109/ESPM251964.2020.00006",
language = "English (US)",
series = "Proceedings of ESPM2 2020: 5th International IEEE Workshop on Extreme Scale Programming Models and Middleware, Held in conjunction with SC 2020: The International Conference for High Performance Computing, Networking, Storage and Analysis",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1--10",
booktitle = "Proceedings of ESPM2 2020",
address = "United States",
}