@inproceedings{2fc80d2554794bbfa12cc7c45ec1d4a6,
title = "TEMPI: An Interposed MPI Library with a Canonical Representation of CUDA-aware Datatypes",
abstract = "MPI derived datatypes are an abstraction that simplifies handling of non-contiguous data in MPI applications. These datatypes are recursively constructed at runtime from primitive Named Types defined in the MPI standard. More recently, the development and deployment of CUDA-aware MPI implementations has encouraged the transition of distributed high-performance MPI codes to use GPUs. Such implementations allow MPI functions to directly operate on GPU buffers, easing integration of GPU compute into MPI codes. This work first presents a novel datatype handling strategy for nested strided datatypes, which finds a middle ground between the specialized or generic handling in prior work. This work also shows that the performance characteristics of non-contiguous data handling can be modeled with empirical system measurements, and used to transparently improve MPI_Send/Recv latency. Finally, despite substantial attention to non-contiguous GPU data and CUDA-aware MPI implementations, good performance cannot be taken for granted. This work demonstrates its contributions through an MPI interposer library, TEMPI. TEMPI can be used with existing MPI deployments without system or application changes. Ultimately, the interposed-library model of this work demonstrates MPI_Pack speedup of up to 242000x and MPI_Send speedup of up to 59000x compared to the MPI implementation deployed on a leadership-class supercomputer. This yields speedup of more than 917x in a 3D halo exchange with 3072 processes. ",
keywords = "cuda, derived datatype, mpi, spectrum mpi, summit",
author = "Carl Pearson and Kun Wu and Chung, {I. Hsin} and Jinjun Xiong and Hwu, {Wen Mei}",
note = "Funding Information: This work is supported by IBM-ILLINOIS Center for Cognitive Computing Systems Research (C3SR) - a research collaboration as part of the IBM AI Horizon Network. This research used resources of the Oak Ridge Leadership Computing Facility at the Oak Ridge National Laboratory, which is supported by the Office of Science of the U.S. Department of Energy Contract No. DE-AC05-00OR22725. This work utilizes resources supported by the National Science Foundation{\textquoteright}s Major Research Instrumentation program, grant #1725729, as well as the University of Illinois at Urbana-Champaign. The authors would also to thank Dawei Mu, Omer Anjum, and Mert Hidayetoglu. Publisher Copyright: {\textcopyright} 2020 ACM.; 30th International Symposium on High-Performance Parallel and Distributed Computing, HPDC 2021 ; Conference date: 21-06-2021 Through 25-06-2021",
year = "2021",
month = jun,
day = "21",
doi = "10.1145/3431379.3460645",
language = "English (US)",
series = "HPDC 2021 - Proceedings of the 30th International Symposium on High-Performance Parallel and Distributed Computing",
publisher = "Association for Computing Machinery, Inc",
pages = "95--106",
booktitle = "HPDC 2021 - Proceedings of the 30th International Symposium on High-Performance Parallel and Distributed Computing",
}