@article {1459, title = {Fault Tolerance of MPI Applications in Exascale Systems: The ULFM Solution}, journal = {Future Generation Computer Systems}, volume = {106}, year = {2020}, month = {2020-05}, pages = { 467-481}, abstract = {The growth in the number of computational resources used by high-performance computing (HPC) systems leads to an increase in failure rates. Fault-tolerant techniques will become essential for long-running applications executing in future exascale systems, not only to ensure the completion of their execution in these systems but also to improve their energy consumption. Although the Message Passing Interface (MPI) is the most popular programming model for distributed-memory HPC systems, as of now, it does not provide any fault-tolerant construct for users to handle failures. Thus, the recovery procedure is postponed until the application is aborted and re-spawned. The proposal of the User Level Failure Mitigation (ULFM) interface in the MPI forum provides new opportunities in this field, enabling the implementation of resilient MPI applications, system runtimes, and programming language constructs able to detect and react to failures without aborting their execution. This paper presents a global overview of the resilience interfaces provided by the ULFM specification, covers archetypal usage patterns and building blocks, and surveys the wide variety of application-driven solutions that have exploited them in recent years. The large and varied number of approaches in the literature proves that ULFM provides the necessary flexibility to implement efficient fault-tolerant MPI applications. All the proposed solutions are based on application-driven recovery mechanisms, which allows reducing the overhead and obtaining the required level of efficiency needed in the future exascale platforms.}, keywords = {Application-level checkpointing, MPI, resilience, ULFM}, issn = {0167-739X}, doi = {https://doi.org/10.1016/j.future.2020.01.026}, url = {https://www.sciencedirect.com/science/article/pii/S0167739X1930860X}, author = {Nuria Losada and Patricia Gonz{\'a}lez and Mar{\'\i}a J. Mart{\'\i}n and George Bosilca and Aurelien Bouteiller and Keita Teranishi} } @conference {1453, title = {Asynchronous Receiver-Driven Replay for Local Rollback of MPI Applications}, booktitle = {Fault Tolerance for HPC at eXtreme Scale (FTXS) Workshop at The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC{\textquoteright}19)}, year = {2019}, month = {2019-11}, abstract = {With the increase in scale and architectural complexity of supercomputers, the management of failures has become integral to successfully executing a long-running high performance computing application. In many instances, failures have a localized scope, usually impacting a subset of the resources being used, yet widely used failure recovery strategies (like checkpoint/restart) fail to take advantage and rely on global, synchronous recovery actions. Even with local rollback recovery, in which only the fault impacted processes are restarted from a checkpoint, the consistency of further progress in the execution is achieved through the replay of communication from a message log. This theoretically sound approach encounters some practical limitations: the presence of collective operations forces a synchronous recovery that prevents survivor processes from continuing their execution, removing any possibility for overlapping further computation with the recovery; and the amount of resources required at recovering peers can be untenable. In this work, we solved both problems by implementing an asynchronous, receiver-driven replay of point-to-point and collective communications, and by exploiting remote-memory access capabilities to access the message logs. This new protocol is evaluated in an implementation of local rollback over the User Level Failure Mitigation fault tolerant Message Passing Interface (MPI). It reduces the recovery times of the failed processes by an average of 59\%, while the time spent in the recovery by the survivor processes is reduced by 95\% when compared to an equivalent global rollback protocol, thus living to the promise of a truly localized impact of recovery actions.}, keywords = {checkpoint/restart, Fault tolerance, Message logging, MPI, ULFM, User Level Fault Mitigation}, url = {https://sc19.supercomputing.org/proceedings/workshops/workshop_files/ws_ftxs103s2-file1.pdf}, author = {Nuria Losada and Aurelien Bouteiller and George Bosilca} } @article {1230, title = {Local Rollback for Resilient MPI Applications with Application-Level Checkpointing and Message Logging}, journal = {Future Generation Computer Systems}, volume = {91}, year = {2019}, month = {2019-02}, pages = {450-464}, abstract = {The resilience approach generally used in high-performance computing (HPC) relies on coordinated checkpoint/restart, a global rollback of all the processes that are running the application. However, in many instances, the failure has a more localized scope and its impact is usually restricted to a subset of the resources being used. Thus, a global rollback would result in unnecessary overhead and energy consumption, since all processes, including those unaffected by the failure, discard their state and roll back to the last checkpoint to repeat computations that were already done. The User Level Failure Mitigation (ULFM) interface {\textendash} the last proposal for the inclusion of resilience features in the Message Passing Interface (MPI) standard {\textendash} enables the deployment of more flexible recovery strategies, including localized recovery. This work proposes a local rollback approach that can be generally applied to Single Program, Multiple Data (SPMD) applications by combining ULFM, the ComPiler for Portable Checkpointing (CPPC) tool, and the Open MPI VProtocol system-level message logging component. Only failed processes are recovered from the last checkpoint, while consistency before further progress in the execution is achieved through a two-level message logging process. To further optimize this approach point-to-point communications are logged by the Open MPI VProtocol component, while collective communications are optimally logged at the application level{\textemdash}thereby decoupling the logging protocol from the particular collective implementation. This spatially coordinated protocol applied by CPPC reduces the log size, the log memory requirements and overall the resilience impact on the applications.}, keywords = {Application-level checkpointing, Local rollback, Message logging, MPI, resilience}, doi = {https://doi.org/10.1016/j.future.2018.09.041}, author = {Nuria Losada and George Bosilca and Aurelien Bouteiller and Patricia Gonz{\'a}lez and Mar{\'\i}a J. Mart{\'\i}n} }