@inproceedings{b780537249b6437185e039dcda4b042c,
title = "Using node information to implement MPI cartesian topologies",
abstract = "The MPI API provides support for Cartesian process topologies, including the option to reorder the processes to achieve better communication performance. But MPI implementations rarely provide anything useful for the reorder option, typically ignoring it. One argument made is that modern interconnects are fast enough that applications are less sensitive to the exact layout of processes onto the system. However, intranode communication performance is much greater than internode communication performance. In this paper, we show a simple approach that takes into account only information about which MPI processes are on the same node to provide a fast and effective implementation of the MPI Cartesian topology. While not optimal, this approach provides a significant improvement over all tested MPI implementations and provides an implementation that may be used as the default in any MPI implementation of MPI_Cart_create.",
keywords = "Cartesian process topology, MPI, Message passing, Process topology",
author = "Gropp, {William D.}",
note = "Funding Information: This research is part of the Blue Waters sustained-petascale computing project, which is supported by the National Science Foundation (award number OCI 07–25070) and the state of Illinois. The author is grateful to Rajeev Thakur for runs on the Theta system at Argonne National Laboratory and to Torsten Hoefler and Timo Schneider for runs on the Piz Daint system at the Swiss National Supercomputing Center. The author also thanks the reviewers for their careful reading and helpful suggestions for improving the presentation. Funding Information: This research is part of the Blue Waters sustained-petascale computing project, which is supported by the National Science Foundation (award number OCI 07-25070) and the state of Illinois. The author is grateful to Rajeev Thakur for runs on the Theta system at Argonne National Laboratory and to Torsten Hoefler and Timo Schneider for runs on the Piz Daint system at the Swiss National Supercomputing Center. The author also thanks the reviewers for their careful reading and helpful suggestions for improving the presentation. Publisher Copyright: {\textcopyright} 2018 Association for Computing Machinery.; 25th European MPI Users' Group Meeting, EuroMPI 2018 ; Conference date: 23-09-2018 Through 26-09-2018",
year = "2018",
month = sep,
day = "23",
doi = "10.1145/3236367.3236377",
language = "English (US)",
series = "ACM International Conference Proceeding Series",
publisher = "Association for Computing Machinery",
booktitle = "EuroMPI 2018 - Proceedings of the 25th European MPI Users' Group Meeting",
address = "United States",
}