@inproceedings {1310, title = {From MPI to OpenSHMEM: Porting LAMMPS}, journal = {OpenSHMEM and Related Technologies. Experiences, Implementations, and Technologies}, year = {2015}, pages = {121{\textendash}137}, publisher = {Springer International Publishing}, address = {Annapolis, MD, USA}, abstract = {This work details the opportunities and challenges of porting a Petascale, MPI-based application {\textendash}-LAMMPS{\textendash}- to OpenSHMEM. We investigate the major programming challenges stemming from the differences in communication semantics, address space organization, and synchronization operations between the two programming models. This work provides several approaches to solve those challenges for representative communication patterns in LAMMPS, e.g., by considering group synchronization, peer{\textquoteright}s buffer status tracking, and unpacked direct transfer of scattered data. The performance of LAMMPS is evaluated on the Titan HPC system at ORNL. The OpenSHMEM implementations are compared with MPI versions in terms of both strong and weak scaling. The results outline that OpenSHMEM provides a rich semantic to implement scalable scientific applications. In addition, the experiments demonstrate that OpenSHMEM can compete with, and often improve on, the optimized MPI implementation.}, isbn = {978-3-319-26428-8}, doi = {10.1007/978-3-319-26428-8_8}, author = {Tang, Chunyan and Aurelien Bouteiller and Thomas Herault and Manjunath Gorentla Venkata and George Bosilca}, editor = {Manjunath Gorentla Venkata and Shamis, Pavel and Imam, Neena and M. Graham Lopez} }