@booklet {, title = {XaaS: Acceleration as a Service to Enable Productive High-Performance Cloud Computing}, year = {2024}, month = {2024-01}, publisher = {arXiv}, abstract = {HPC and Cloud have evolved independently, specializing their innovations into performance or productivity. Acceleration as a Service (XaaS) is a recipe to empower both fields with a shared execution platform that provides transparent access to computing resources, regardless of the underlying cloud or HPC service provider. Bridging HPC and cloud advancements, XaaS presents a unified architecture built on performance-portable containers. Our converged model concentrates on low-overhead, high-performance communication and computing, targeting resource-intensive workloads from climate simulations to machine learning. XaaS lifts the restricted allocation model of Function-as-a-Service (FaaS), allowing users to benefit from the flexibility and efficient resource utilization of serverless while supporting long-running and performance-sensitive workloads from HPC.}, url = {https://arxiv.org/abs/2401.04552}, author = {Torsten Hoefler and Marcin Copik and Pete Beckman and Andrew Jones and Ian Foster and Manish Parashar and Daniel Reed and Matthias Troyer and Thomas Schulthess and Dan Ernst and Jack Dongarra} } @booklet {, title = {Generalizing Random Butterfly Transforms to Arbitrary Matrix Sizes}, year = {2023}, month = {2023-12}, publisher = {arXiv}, abstract = {Parker and L{\^e} introduced random butterfly transforms (RBTs) as a preprocessing technique to replace pivoting in dense LU factorization. Unfortunately, their FFT-like recursive structure restricts the dimensions of the matrix. Furthermore, on multi-node systems, efficient management of the communication overheads restricts the matrix{\textquoteright}s distribution even more. To remove these limitations, we have generalized the RBT to arbitrary matrix sizes by truncating the dimensions of each layer in the transform. We expanded Parker{\textquoteright}s theoretical analysis to generalized RBT, specifically that in exact arithmetic, Gaussian elimination with no pivoting will succeed with probability 1 after transforming a matrix with full-depth RBTs. Furthermore, we experimentally show that these generalized transforms improve performance over Parker{\textquoteright}s formulation by up to 62\% while retaining the ability to replace pivoting. This generalized RBT is available in the SLATE numerical software library.}, url = {https://arxiv.org/abs/2312.09376}, author = {Neil Lindquist and Piotr Luszczek and Jack Dongarra} } @inproceedings {, title = {Memory Traffic and Complete Application Profiling with PAPI Multi-Component Measurements}, journal = {2023 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2023}, month = {2023-08}, publisher = {IEEE}, address = {St. Petersburg, Florida}, abstract = {Some of the most important categories of performance events count the data traffic between the processing cores and the main memory. However, since these counters are not core-private, applications require elevated privileges to access them. PAPI offers a component that can access this information on IBM systems through the Performance Co-Pilot (PCP); however, doing so adds an indirection layer that involves querying the PCP daemon. This paper performs a quantitative study of the accuracy of the measurements obtained through this component on the Summit supercomputer. We use two linear algebra kernels---a generalized matrix multiply, and a modified matrix-vector multiply---as benchmarks and a distributed, GPU-accelerated 3D-FFT mini-app (using cuFFT) to compare the measurements obtained through the PAPI PCP component against the expected values across different problem sizes. We also compare our measurements against an in-house machine with a very similar architecture to Summit, where elevated privileges allow PAPI to access the hardware counters directly (without using PCP) to show that measurements taken via PCP are as accurate as the those taken directly. Finally, using both QMCPACK and the 3D-FFT, we demonstrate the diverse hardware activities that can be monitored simultaneously via PAPI hardware components.}, keywords = {GPU power, High Performance Computing, network traffic, papi, performance analysis, Performance Counters}, doi = {10.1109/IPDPSW59300.2023.00070}, url = {https://ieeexplore.ieee.org/document/10196656}, author = {Daniel Barry and Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {, title = {Memory Traffic and Complete Application Profiling with PAPI Multi-Component Measurements}, year = {2023}, month = {2023-05}, publisher = {28th HIPS Workshop}, address = {St. Petersburg, FL}, author = {Daniel Barry and Heike Jagode and Anthony Danalis and Jack Dongarra} } @conference {, title = {Using Additive Modifications in LU Factorization Instead of Pivoting}, booktitle = {37th ACM International Conference on Supercomputing (ICS{\textquoteright}23)}, year = {2023}, month = {2023-06}, publisher = {ACM}, organization = {ACM}, address = {Orlando, FL}, doi = {10.1145/3577193.3593731}, author = {Neil Lindquist and Piotr Luszczek and Jack Dongarra} } @article {, title = {Accelerating Geostatistical Modeling and Prediction With Mixed-Precision Computations: A High-Productivity Approach With PaRSEC}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {33}, year = {2022}, month = {2022-04}, pages = {964 - 976}, abstract = {Geostatistical modeling, one of the prime motivating applications for exascale computing, is a technique for predicting desired quantities from geographically distributed data, based on statistical models and optimization of parameters. Spatial data are assumed to possess properties of stationarity or non-stationarity via a kernel fitted to a covariance matrix. A primary workhorse of stationary spatial statistics is Gaussian maximum log-likelihood estimation (MLE), whose central data structure is a dense, symmetric positive definite covariance matrix of the dimension of the number of correlated observations. Two essential operations in MLE are the application of the inverse and evaluation of the determinant of the covariance matrix. These can be rendered through the Cholesky decomposition and triangular solution. In this contribution, we reduce the precision of weakly correlated locations to single- or half- precision based on distance. We thus exploit mathematical structure to migrate MLE to a three-precision approximation that takes advantage of contemporary architectures offering BLAS3-like operations in a single instruction that are extremely fast for reduced precision. We illustrate application-expected accuracy worthy of double-precision from a majority half-precision computation, in a context where uniform single-precision is by itself insufficient. In tackling the complexity and imbalance caused by the mixing of three precisions, we deploy the PaRSEC runtime system. PaRSEC delivers on-demand casting of precisions while orchestrating tasks and data movement in a multi-GPU distributed-memory environment within a tile-based Cholesky factorization. Application-expected accuracy is maintained while achieving up to 1.59X by mixing FP64/FP32 operations on 1536 nodes of HAWK or 4096 nodes of Shaheen II , and up to 2.64X by mixing FP64/FP32/FP16 operations on 128 nodes of Summit , relative to FP64-only operations. This translates into up to 4.5, 4.7, ...}, keywords = {Computational modeling, Covariance matrices, Data models, Maximum likelihood estimation, Predictive models, runtime, Task analysis}, issn = {1045-9219}, doi = {10.1109/TPDS.2021.3084071}, url = {https://ieeexplore.ieee.org/document/9442267/https://ieeexplore.ieee.org/ielam/71/9575177/9442267-aam.pdfhttp://xplorestaging.ieee.org/ielx7/71/9575177/09442267.pdf?arnumber=9442267}, author = {Abdulah, Sameh and Qinglei Cao and Pei, Yu and George Bosilca and Jack Dongarra and Genton, Marc G. and Keyes, David E. and Ltaief, Hatem and Sun, Ying} } @inproceedings {, title = {Addressing Irregular Patterns of Matrix Computations on GPUs and Their Impact on Applications Powered by Sparse Direct Solvers}, journal = {2022 International Conference for High Performance Computing, Networking, Storage and Analysis (SC22)}, year = {2022}, month = {2022-11}, pages = {354-367}, publisher = {IEEE Computer Society}, address = {Dallas, TX}, abstract = {Many scientific applications rely on sparse direct solvers for their numerical robustness. However, performance optimization for these solvers remains a challenging task, especially on GPUs. This is due to workloads of small dense matrices that are different in size. Matrix decompositions on such irregular workloads are rarely addressed on GPUs. This paper addresses irregular workloads of matrix computations on GPUs, and their application to accelerate sparse direct solvers. We design an interface for the basic matrix operations supporting problems of different sizes. The interface enables us to develop irrLU-GPU, an LU decomposition on matrices of different sizes. We demonstrate the impact of irrLU-GPU on sparse direct LU solvers using NVIDIA and AMD GPUs. Experimental results are shown for a sparse direct solver based on a multifrontal sparse LU decomposition applied to linear systems arising from the simulation, using finite element discretization on unstructured meshes, of a high-frequency indefinite Maxwell problem.}, keywords = {GPU computing, irregular computational workloads, lu factorization, multifrontal solvers, sparse direct solvers}, url = {https://dl.acm.org/doi/abs/10.5555/3571885.3571919}, author = {Ahmad Abdelfattah and Pieter Ghysels and Wajih Boukaram and Stanimire Tomov and Xiaoye Sherry Li and Jack Dongarra} } @techreport {, title = {Analysis of the Communication and Computation Cost of FFT Libraries towards Exascale}, journal = {ICL Technical Report}, number = {ICL-UT-22-07}, year = {2022}, month = {2022-07}, publisher = {Innovative Computing Laboratory}, author = {Alan Ayala and Stanimire Tomov and Piotr Luszczek and Sebastien Cayrols and Gerald Ragghianti and Jack Dongarra} } @techreport {, title = {Communication Avoiding LU with Tournament Pivoting in SLATE}, journal = {SLATE Working Notes}, number = {18, ICL-UT-22-01}, year = {2022}, month = {2022-01}, author = {Rabab Alomairy and Mark Gates and Sebastien Cayrols and Dalal Sukkari and Kadir Akbudak and Asim YarKhan and Paul Bagwell and Jack Dongarra} } @article {, title = {Evaluating Data Redistribution in PaRSEC}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {33}, number = {8}, year = {2022}, month = {2022-08}, pages = {1856-1872}, doi = {10.1109/TPDS.2021.3131657}, author = {Qinglei Cao and George Bosilca and Losada, Nuria and Wu, Wei and Zhong, Dong and Jack Dongarra} } @techreport {, title = {FFT Benchmark Performance Experiments on Systems Targeting Exascale}, journal = {ICL Technical Report}, number = {ICL-UT-22-02}, year = {2022}, month = {2022-03}, author = {Alan Ayala and Stanimire Tomov and Piotr Luszczek and Sebastien Cayrols and Gerald Ragghianti and Jack Dongarra} } @conference {, title = {A Framework to Exploit Data Sparsity in Tile Low-Rank Cholesky Factorization}, booktitle = {IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2022}, month = {2022-07}, doi = {10.1109/IPDPS53621.2022.00047}, url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=\&arnumber=9820680\&isnumber=9820610}, author = {Qinglei Cao and Rabab Alomairy and Yu Pei and George Bosilca and Hatem Ltaief and David Keyes and Jack Dongarra} } @techreport {, title = {Mixed precision and approximate 3D FFTs: Speed for accuracy trade-off with GPU-aware MPI and run-time data compression}, journal = {ICL Technical Report}, number = {ICL-UT-22-04}, year = {2022}, month = {2022-05}, keywords = {All-to-all, Approximate FFTs, ECP, heFFTe, Lossy compression, mixed-precision algorithms, MPI}, author = {Sebastien Cayrols and Jiali Li and George Bosilca and Stanimire Tomov and Alan Ayala and Jack Dongarra} } @techreport {, title = {PAQR: Pivoting Avoiding QR factorization}, journal = {ICL Technical Report}, number = {ICL-UT-22-06}, year = {2022}, month = {2022-06}, abstract = {The solution of linear least-squares problems is at the heart of many scientific and engineering applications. While any method able to minimize the backward error of such problems is considered numerically stable, the theory states that the forward error depends on the condition number of the matrix in the system of equations. On the one hand, the QR factorization is an efficient method to solve such problems, but the solutions it produces may have large forward errors when the matrix is deficient. On the other hand, QR with column pivoting (QRCP) is able to produce smaller forward errors on deficient matrices, but its cost is prohibitive compared to QR. The aim of this paper is to propose PAQR, an alternative solution method with the same cost (or smaller) as QR and as accurate as QRCP in practical cases, for the solution of rank-deficient linear least-squares problems. After presenting the algorithm and its implementations on different architectures, we compare its accuracy and performance results on a variety of application problems. }, author = {Wissam M. Sid-Lakhdar and Sebastien Cayrols and Daniel Bielich and Ahmad Abdelfattah and Piotr Luszczek and Mark Gates and Stanimire Tomov and Hans Johansen and David Williams-Young and Timothy A. Davis and Jack Dongarra} } @techreport {, title = {Randomized Numerical Linear Algebra: A Perspective on the Field with an Eye to Software}, journal = {University of California, Berkeley EECS Technical Report}, number = {UCB/EECS-2022-258}, year = {2022}, month = {2022-11}, publisher = {University of California, Berkeley}, abstract = {Randomized numerical linear algebra {\textendash} RandNLA, for short {\textendash} concerns the use of randomization as a resource to develop improved algorithms for large-scale linear algebra computations. The origins of contemporary RandNLA lay in theoretical computer science, where it blossomed from a simple idea: randomization provides an avenue for computing approximate solutions to linear algebra problems more efficiently than deterministic algorithms. This idea proved fruitful in and was largely driven by the development of scalable algorithms for machine learning and statistical data analysis applications. However, the true potential of RandNLA only came into focus once it began to integrate with the fields of numerical analysis and {\textquotedblleft}classical{\textquotedblright} numerical linear algebra. Through the efforts of many individuals, randomized algorithms have been developed that provide full control over the accuracy of their solutions and that are every bit as reliable as algorithms that might be found in libraries such as LAPACK. The spectrum of possibilities offered by RandNLA has created a virtuous cycle of contributions by numerical analysts, statisticians, theoretical computer scientists, and the machine learning community. Recent years have even seen the incorporation of certain RandNLA methods into MATLAB, the NAG Library, and NVIDIA{\textquoteright}s cuSOLVER. In view of these developments, we believe the time is ripe to accelerate the adoption of RandNLA in the scientific community. In particular, we believe the community stands to benefit significantly from a suitably defined {\textquotedblleft}RandBLAS{\textquotedblright} and {\textquotedblleft}RandLAPACK,{\textquotedblright} to serve as standard libraries for RandNLA, in much the same way that BLAS and LAPACK serve as standards for deterministic linear algebra. This monograph surveys the field of RandNLA as a step toward building mean- ingful RandBLAS and RandLAPACK libraries. Section 1 begins by setting scope and design principles for RandLAPACK and summarizing subsequent sections of the monograph. Section 2 focuses on RandBLAS, which is to be responsible for sketching. Details of functionality suitable for RandLAPACK are covered in the five sections that follow. Specifically, Sections 3 to 5 cover least squares and optimization, low- rank approximation, and other select problems that are well-understood in how they benefit from randomized algorithms. The remaining sections {\textendash} on statistical leverage scores (Section 6) and tensor computations (Section 7) {\textendash} read more like traditional surveys. The different flavor of these latter sections reflects how, in our assessment, the literature on these topics is still maturing. We provide a substantial amount of pseudo-code and supplementary material over the course of five appendices. Much of the pseudo-code has been tested via publicly available Matlab and Python implementations.}, keywords = {Randomized algorithms}, doi = {10.48550/arXiv.2302.1147}, url = {https://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-258.html}, author = {Riley Murray and James Demmel and Michael W. Mahoney and N. Benjamin Erichson and Maksim Melnichenko and Osman Asif Malik and Laura Grigori and Piotr Luszczek and Micha{\l} Derezi{\'n}ski and Miles E. Lopes and Tianyu Liang and Hengrui Luo and Jack Dongarra} } @article {, title = {Reinventing High Performance Computing: Challenges and Opportunities}, number = {ICL-UT-22-03}, year = {2022}, month = {2022-03}, abstract = {The world of computing is in rapid transition, now dominated by a world of smartphones and cloud services, with profound implications for the future of advanced scientific computing. Simply put, high-performance computing (HPC) is at an important inflection point. For the last 60 years, the world{\textquoteright}s fastest supercomputers were almost exclusively produced in the United States on behalf of scientific research in the national laboratories. Change is now in the wind. While costs now stretch the limits of U.S. government funding for advanced computing, Japan and China are now leaders in the bespoke HPC systems funded by government mandates. Meanwhile, the global semiconductor shortage and political battles surrounding fabrication facilities affect everyone. However, another, perhaps even deeper, fundamental change has occurred. The major cloud vendors have invested in global networks of massive scale systems that dwarf today{\textquoteright}s HPC systems. Driven by the computing demands of AI, these cloud systems are increasingly built using custom semiconductors, reducing the financial leverage of traditional computing vendors. These cloud systems are now breaking barriers in game playing and computer vision, reshaping how we think about the nature of scientific computation. Building the next generation of leading edge HPC systems will require rethinking many fundamentals and historical approaches by embracing end-to-end co-design; custom hardware configurations and packaging; large-scale prototyping, as was common thirty years ago; and collaborative partnerships with the dominant computing ecosystem companies, smartphone and cloud computing vendors.}, author = {Daniel Reed and Dennis Gannon and Jack Dongarra} } @techreport {, title = {Report on the Oak Ridge National Laboratory{\textquoteright}s Frontier System}, journal = {ICL Technical Report}, number = {ICL-UT-22-05}, year = {2022}, month = {2022-05}, author = {Jack Dongarra and Al Geist} } @conference {, title = {Threshold Pivoting for Dense LU Factorization}, booktitle = {ScalAH22: 13th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Heterogeneous Systems }, year = {2022}, month = {2022-11}, publisher = {IEEE}, organization = {IEEE}, address = {Dallas, Texas}, abstract = {LU factorization is a key approach for solving large, dense systems of linear equations. Partial row pivoting is commonly used to ensure numerical stability; however, the data movement needed for the row interchanges can reduce performance. To improve this, we propose using threshold pivoting to find pivots almost as good as those selected by partial pivoting but that result in less data movement. Our theoretical analysis bounds the element growth similarly to partial pivoting; however, it also shows that the growth of threshold pivoting for a given matrix cannot be bounded by that of partial pivoting and vice versa. Additionally, we experimentally tested the approach on the Summit supercomputer. Threshold pivoting improved performance by up to 32\% without a significant effect on accuracy. For a more aggressive configuration with up to one digit of accuracy lost, the improvement was as high as 44\%.}, doi = {10.1109/ScalAH56622.2022.00010}, author = {Neil Lindquist and Mark Gates and Piotr Luszczek and Jack Dongarra} } @article {, title = {20 years of computational science: Selected papers from 2020 International Conference on Computational Science}, journal = {Journal of Computational Science}, volume = {53}, year = {2021}, pages = {101395{\textendash}101395}, abstract = {We thank the authors of the selected papers for their valuable contributions, the reviewers of this special section for their in-depth reviews and constructive comments, the ICCS program committee members, and workshop organizers for their diligent work ensuring the high standard of accepted ICCS papers. As always, we also thank Springer for publishing the conference proceedings and Elsevier for their continuous support and inspiration during the preparation and publishing of this virtual special issue.}, doi = {10.1016/j.jocs.2021.101395}, author = {Kovalchuk, Sergey V and Krzhizhanovskaya, Valeria V and Sloot, PMA and Z{\'a}vodszky, G{\'a}bor and Lees, Michael H and Paszy{\'n}ski, M and Jack Dongarra} } @article {, title = {Accelerating FFT towards Exascale Computing}, year = {2021}, publisher = {NVIDIA GPU Technology Conference (GTC2021)}, author = {Alan Ayala and Stanimire Tomov and Haidar, Azzam and Stoyanov, M. and Cayrols, Sebastien and Li, Jiali and George Bosilca and Jack Dongarra} } @article {, title = {Accelerating Restarted GMRES with Mixed Precision Arithmetic}, journal = {IEEE Transactions on Parallel and Distributed Systems}, year = {2021}, month = {2021-06}, abstract = {The generalized minimum residual method (GMRES) is a commonly used iterative Krylov solver for sparse, non-symmetric systems of linear equations. Like other iterative solvers, data movement dominates its run time. To improve this performance, we propose running GMRES in reduced precision with key operations remaining in full precision. Additionally, we provide theoretical results linking the convergence of finite precision GMRES with classical Gram-Schmidt with reorthogonalization (CGSR) and its infinite precision counterpart which helps justify the convergence of this method to double-precision accuracy. We tested the mixed-precision approach with a variety of matrices and preconditioners on a GPU-accelerated node. Excluding the incomplete LU factorization without fill in (ILU(0)) preconditioner, we achieved average speedups ranging from 8 to 61 percent relative to comparable double-precision implementations, with the simpler preconditioners achieving the higher speedups. }, keywords = {Convergence, Error correction, iterative methods, Kernel, linear systems, Stability analysis}, doi = {10.1109/TPDS.2021.3090757}, author = {Neil Lindquist and Piotr Luszczek and Jack Dongarra} } @conference {, title = {Distributed-Memory Multi-GPU Block-Sparse Tensor Contraction for Electronic Structure}, booktitle = {35th IEEE International Parallel \& Distributed Processing Symposium (IPDPS 2021)}, year = {2021}, month = {2021-05}, publisher = {IEEE}, organization = {IEEE}, address = {Portland, OR}, abstract = {Many domains of scientific simulation (chemistry, condensed matter physics, data science) increasingly eschew dense tensors for block-sparse tensors, sometimes with additional structure (recursive hierarchy, rank sparsity, etc.). Distributed-memory parallel computation with block-sparse tensorial data is paramount to minimize the time-tosolution (e.g., to study dynamical problems or for real-time analysis) and to accommodate problems of realistic size that are too large to fit into the host/device memory of a single node equipped with accelerators. Unfortunately, computation with such irregular data structures is a poor match to the dominant imperative, bulk-synchronous parallel programming model. In this paper, we focus on the critical element of block-sparse tensor algebra, namely binary tensor contraction, and report on an efficient and scalable implementation using the task-focused PaRSEC runtime. High performance of the block-sparse tensor contraction on the Summit supercomputer is demonstrated for synthetic data as well as for real data involved in electronic structure simulations of unprecedented size.}, keywords = {block-sparse matrix multiplication, distributed-memory, Electronic structure, multi-GPU node, parsec, tensor contraction}, url = {https://hal.inria.fr/hal-02970659/document}, author = {Thomas Herault and Yves Robert and George Bosilca and Robert Harrison and Cannada Lewis and Edward Valeev and Jack Dongarra} } @article {, title = {DTE: PaRSEC Enabled Libraries and Applications}, year = {2021}, month = {2021-04}, publisher = {2021 Exascale Computing Project Annual Meeting}, author = {George Bosilca and Thomas Herault and Jack Dongarra} } @article {, title = {Efficient exascale discretizations: High-order finite element methods}, journal = {The International Journal of High Performance Computing Applications}, year = {2021}, pages = {10943420211020803}, abstract = {Efficient exploitation of exascale architectures requires rethinking of the numerical algorithms used in many large-scale applications. These architectures favor algorithms that expose ultra fine-grain parallelism and maximize the ratio of floating point operations to energy intensive data movement. One of the few viable approaches to achieve high efficiency in the area of PDE discretizations on unstructured grids is to use matrix-free/partially assembled high-order finite element methods, since these methods can increase the accuracy and/or lower the computational time due to reduced data motion. In this paper we provide an overview of the research and development activities in the Center for Efficient Exascale Discretizations (CEED), a co-design center in the Exascale Computing Project that is focused on the development of next-generation discretization software and algorithms to enable a wide range of finite element applications to run efficiently on future hardware. CEED is a research partnership involving more than 30 computational scientists from two US national labs and five universities, including members of the Nek5000, MFEM, MAGMA and PETSc projects. We discuss the CEED co-design activities based on targeted benchmarks, miniapps and discretization libraries and our work on performance optimizations for large-scale GPU architectures. We also provide a broad overview of research and development activities in areas such as unstructured adaptive mesh refinement algorithms, matrix-free linear solvers, high-order data visualization, and list examples of collaborations with several ECP and external applications.}, keywords = {co-design, high-order discretizations, High-performance computing, PDEs, unstructured grids}, doi = {10.1177/10943420211020803}, author = {Kolev, Tzanio and Fischer, Paul and Min, Misun and Jack Dongarra and Brown, Jed and Dobrev, Veselin and Warburton, Tim and Stanimire Tomov and Shephard, Mark S and Abdelfattah, Ahmad and others} } @article {, title = {Exploiting Block Structures of KKT Matrices for Efficient Solution of Convex Optimization Problems}, journal = {IEEE Access}, year = {2021}, doi = {10.1109/ACCESS.2021.3106054}, author = {Iqbal, Zafar and Nooshabadi, Saeid and Yamazaki, Ichitaro and Stanimire Tomov and Jack Dongarra} } @techreport {, title = {Interim Report on Benchmarking FFT Libraries on High Performance Systems}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-21-03}, year = {2021}, month = {2021-07}, publisher = {University of Tennessee}, type = {ICL Tech Report}, abstract = {The Fast Fourier Transform (FFT) is used in many applications such as molecular dynamics, spectrum estimation, fast convolution and correlation, signal modulation, and many wireless multimedia applications. FFTs are also heavily used in ECP applications, such as EXAALT, Copa, ExaSky-HACC, ExaWind, WarpX, and many others. As these applications{\textquoteright} accuracy and speed depend on the performance of the FFTs, we designed an FFT benchmark to mea- sure performance and scalability of currently available FFT packages and present the results from a pre-Exascale platform. Our benchmarking also stresses the overall capacity of system interconnect; thus, it may be considered as an indicator of the bisection bandwidth, communication contention noise, and the software overheads in MPI collectives that are of interest to many other ECP applications and libraries. This FFT benchmarking project aims to show the strengths and weaknesses of multiple FFT libraries and to indicate what can be done to improve their performance. In particular, we believe that the benchmarking results could help design and implement a fast and robust FFT library for 2D and 3D inputs, while targeting large-scale heterogeneous systems with multicore processors and hardware accelerators that are a co-designed in tandem with ECP applications. Our work involves studying and analyzing state-of-the-art FFT software both from vendors and available as open-source codes to better understand their performance.}, author = {Alan Ayala and Stanimire Tomov and Piotr Luszczek and Cayrols, Sebastien and Ragghianti, Gerald and Jack Dongarra} } @inbook {, title = {An Introduction to High Performance Computing and Its Intersection with Advances in Modeling Rare Earth Elements and Actinides}, booktitle = {Rare Earth Elements and Actinides: Progress in Computational Science Applications}, volume = {1388}, year = {2021}, month = {2021-10}, pages = {3-53}, publisher = {American Chemical Society}, organization = {American Chemical Society}, chapter = {1}, address = {Washington, DC}, abstract = {Computationally driven solutions in nuclear and radiochemistry heavily depend on efficient modeling of Rare Earth Elements (REEs) and actinides. Accurate modeling of REEs and actinides faces challenges stemming from limitations from an imbalanced hardware-software ecosystem and its implications on inefficient use of High Performance Computing (HPC). This chapter provides a historical perspective on the evolution of HPC hardware, its intersectionality with domain sciences, the importance of benchmarks for performance, and an overview of challenges and advances in modeling REEs and actinides. This chapter intends to provide an introduction for researchers at the intersection of scientific computing, software development for HPC, and applied computational modeling of REEs and actinides. The chapter is structured in five sections. First, the Introduction includes subsections focusing on the Importance of REEs and Actinides (1.1), Hardware, Software, and the HPC Ecosystem (1.2), and Electronic Structure Modeling of REEs and Actinides (1.3). Second, a section in High Performance Computing focuses on the TOP500 (2.1), HPC Performance (2.2), HPC Benchmarks: Processing, Bandwidth, and Latency (2.3), and HPC Benchmarks and their Relationship to Chemical Modeling (2.4). Third, the Software Challenges and Advances focus on NWChem/NWChemEx (3.1), MADNESS (3.2), and MPQC (3.3). The fourth section provides a short overview of Artificial Intelligence in HPC applications relevant to nuclear and radiochemistry. The fifth section illustrates A Protocol to Evaluate Complexation Preferences in Separations of REEs and Actinides through Computational Modeling.}, keywords = {actinide, Computational modeling, HPC, REE}, isbn = {ISBN13: 9780841298255 eISBN: 9780841298248}, doi = {10.1021/bk-2021-1388.ch001}, url = {https://pubs.acs.org/doi/10.1021/bk-2021-1388.ch001}, author = {Deborah A. Penchoff and Edward Valeev and Heike Jagode and Piotr Luszczek and Anthony Danalis and George Bosilca and Robert J. Harrison and Jack Dongarra and Theresa L. Windus} } @conference {, title = {Leveraging PaRSEC Runtime Support to Tackle Challenging 3D Data-Sparse Matrix Problems}, booktitle = {35th IEEE International Parallel \& Distributed Processing Symposium (IPDPS 2021)}, year = {2021}, month = {2021-05}, publisher = {IEEE}, organization = {IEEE}, address = {Portland, OR}, abstract = {The task-based programming model associated with dynamic runtime systems has gained popularity for challenging problems because of workload imbalance, heterogeneous resources, or extreme concurrency. During the last decade, lowrank matrix approximations, where the main idea consists of exploiting data sparsity typically by compressing off-diagonal tiles up to an application-specific accuracy threshold, have been adopted to address the curse of dimensionality at extreme scale. In this paper, we create a bridge between the runtime and the linear algebra by communicating knowledge of the data sparsity to the runtime. We design and implement this synergistic approach with high user productivity in mind, in the context of the PaRSEC runtime system and the HiCMA numerical library. This requires to extend PaRSEC with new features to integrate rank information into the dataflow so that proper decisions can be taken at runtime. We focus on the tile low-rank (TLR) Cholesky factorization for solving 3D data-sparse covariance matrix problems arising in environmental applications. In particular, we employ the 3D exponential model of Matern matrix kernel, which exhibits challenging nonuniform {\textasciiacute}high ranks in off-diagonal tiles. We first provide a dynamic data structure management driven by a performance model to reduce extra floating-point operations. Next, we optimize the memory footprint of the application by relying on a dynamic memory allocator, and supported by a rank-aware data distribution to cope with the workload imbalance. Finally, we expose further parallelism using kernel recursive formulations to shorten the critical path. Our resulting high-performance implementation outperforms existing data-sparse TLR Cholesky factorization by up to 7-fold on a large-scale distributed-memory system, while minimizing the memory footprint up to a 44-fold factor. This multidisciplinary work highlights the need to empower runtime systems beyond their original duty of task scheduling for servicing next-generation low-rank matrix algebra libraries.}, keywords = {asynchronous executions and load balancing, dynamic runtime system, environmental applications, High-performance computing, low-rank matrix computations, task-based programming model, user productivity}, author = {Qinglei Cao and Yu Pei and Kadir Akbudak and George Bosilca and Hatem Ltaief and David Keyes and Jack Dongarra} } @techreport {, title = {Mixed-Precision Algorithm for Finding Selected Eigenvalues and Eigenvectors of Symmetric and Hermitian Matrices}, journal = {ICL Technical Report}, number = {ICL-UT-21-05}, year = {2021}, month = {2021-08}, abstract = {As the new hardware is being equipped with powerful low-precision capabilities driven primarily by the needs of the burgeoning field of Artificial Intelligence (AI), mixed-precision algorithms are now showing far greater potential and renewed interest in scientific computing community. The multi-precision methods commonly follow approximate-iterate scheme by first obtaining the approximate solution from a low-precision factorization and solve. Then, they iteratively refine the solution to the desired accuracy that is often as high as what is possible with traditional approaches. While targeting symmetric and Hermitian eigenvalue problems of the form Ax=\&$\#$955;x, we revisit the SICE algorithm proposed by Dongarra et al. By applying the Sherman-Morrison formula on the diagonally-shifted tridiagonal systems, we propose an updated SICE-SM algorithm. By incorporating the latest two-stage algorithms from the PLASMA and MAGMA software libraries for numerical linear algebra, we achieved up to 3.6x speedup using the mixed-precision eigensolver with the blocked SICE-SM algorithm for iterative refinement when compared with full double complex precision solvers for the cases with a portion of eigenvalues and eigenvectors requested. }, keywords = {eigenvalue solver, hardware accelerators, mixed-precision algorithms}, author = {Yaohung M. Tsai and Piotr Luszczek and Jack Dongarra} } @techreport {, title = {A More Portable HeFFTe: Implementing a Fallback Algorithm for Scalable Fourier Transforms}, journal = {ICL Technical Report}, number = {ICL-UT-21-04}, year = {2021}, note = {accepted at HPEC{\textquoteright}21}, month = {2021-08}, publisher = {University of Tennessee}, type = {ICL Tech Report}, author = {Daniel Sharp and Miroslav Stoyanov and Stanimire Tomov and Jack Dongarra} } @inproceedings {, title = {Revisiting Credit Distribution Algorithms for Distributed Termination Detection}, journal = {2021 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2021}, pages = {611{\textendash}620}, publisher = {IEEE}, abstract = {This paper revisits distributed termination detection algorithms in the context of High-Performance Computing (HPC) applications. We introduce an efficient variant of the Credit Distribution Algorithm (CDA) and compare it to the original algorithm (HCDA) as well as to its two primary competitors: the Four Counters algorithm (4C) and the Efficient Delay-Optimal Distributed algorithm (EDOD). We analyze the behavior of each algorithm for some simplified task-based kernels and show the superiority of CDA in terms of the number of control messages.}, keywords = {control messages, credit distribution algorithms, task-based HPC application, Termination detection}, doi = {10.1109/IPDPSW52791.2021.00095}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Le F{\`e}vre, Valentin and Robert, Yves and Jack Dongarra} } @inproceedings {, title = {Scalability Issues in FFT Computation}, journal = {International Conference on Parallel Computing Technologies}, year = {2021}, pages = {279{\textendash}287}, publisher = {Springer}, abstract = {The fast Fourier transform (FFT), is one the most important tools in mathematics, and it is widely required by several applications of science and engineering. State-of-the-art parallel implementations of the FFT algorithm, based on Cooley-Tukey developments, are known to be communication-bound, which causes critical issues when scaling the computational and architectural capabilities. In this paper, we study the main performance bottleneck of FFT computations on hybrid CPU and GPU systems at large-scale. We provide numerical simulations and potential acceleration techniques that can be easily integrated into FFT distributed libraries. We present different experiments on performance scalability and runtime analysis on the world{\textquoteright}s most powerful supercomputers today: Summit, using up to 6,144 NVIDIA V100 GPUs, and Fugaku, using more than one million Fujitsu A64FX cores.}, keywords = {Hybrid systems, Parallel FFT, scalability}, isbn = {978-3-030-86359-3}, doi = {10.1007/978-3-030-86359-3_21}, author = {Alan Ayala and Stanimire Tomov and Stoyanov, Miroslav and Jack Dongarra} } @article {, title = {A Set of Batched Basic Linear Algebra Subprograms and LAPACK Routines}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {47}, number = {3}, year = {2021}, pages = {1{\textendash}23}, abstract = {This article describes a standard API for a set of Batched Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). The focus is on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The matrices are grouped together in uniformly sized groups, with just one group if all the matrices are of equal size. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance many-core platforms. These include multicore and many-core CPU processors, GPUs and coprocessors, and other hardware accelerators with floating-point compute facility. As well as the standard types of single and double precision, we also include half and quadruple precision in the standard. In particular, half precision is used in many very large scale applications, such as those associated with machine learning.}, keywords = {Computations on matrices, Mathematical analysis, Mathematics of computing, Numerical analysis}, doi = {10.1145/3431921}, author = {Abdelfattah, Ahmad and Costa, Timothy and Jack Dongarra and Mark Gates and Haidar, Azzam and Hammarling, Sven and Higham, Nicholas J and Kurzak, Jakub and Piotr Luszczek and Stanimire Tomov and others} } @techreport {, title = {SLATE Performance Improvements: QR and Eigenvalues}, journal = {SLATE Working Notes}, number = {17, ICL-UT-21-02}, year = {2021}, month = {2021-04}, author = {Kadir Akbudak and Paul Bagwell and Sebastien Cayrols and Mark Gates and Dalal Sukkari and Asim YarKhan and Jack Dongarra} } @techreport {, title = {SLATE Port to AMD and Intel Platforms}, journal = {SLATE Working Notes}, number = {16, ICL-UT-21-01}, year = {2021}, month = {2021-04}, author = {Ahmad Abdelfattah and Mohammed Al Farhan and Cade Brown and Mark Gates and Dalal Sukkari and Asim YarKhan and Jack Dongarra} } @article {, title = {A survey of numerical linear algebra methods utilizing mixed-precision arithmetic}, journal = {The International Journal of High Performance Computing Applications}, volume = {35}, number = {4}, year = {2021}, pages = {344{\textendash}369}, abstract = {The efficient utilization of mixed-precision numerical linear algebra algorithms can offer attractive acceleration to scientific computing applications. Especially with the hardware integration of low-precision special-function units designed for machine learning applications, the traditional numerical algorithms community urgently needs to reconsider the floating point formats used in the distinct operations to efficiently leverage the available compute power. In this work, we provide a comprehensive survey of mixed-precision numerical linear algebra routines, including the underlying concepts, theoretical background, and experimental results for both dense and sparse linear algebra problems.}, keywords = {GPUs, High-performance computing, linear algebra, Mixed-precision arithmetic, numerical mathematics}, doi = {10.1177/10943420211003313}, author = {Abdelfattah, Ahmad and Anzt, Hartwig and Boman, Erik G and Carson, Erin and Cojean, Terry and Jack Dongarra and Fox, Alyson and Mark Gates and Higham, Nicholas J and Li, Xiaoye S and others} } @article {, title = {Translational process: Mathematical software perspective}, journal = {Journal of Computational Science}, volume = {52}, year = {2021}, pages = {101216}, abstract = {Each successive generation of computer architecture has brought new challenges to achieving high performance mathematical solvers, necessitating development and analysis of new algorithms, which are then embodied in software libraries. These libraries hide architectural details from applications, allowing them to achieve a level of portability across platforms from desktops to world-class high performance computing (HPC) systems. Thus there has been an informal translational computer science process of developing algorithms and distributing them in open source software libraries for adoption by applications and vendors. With the move to exascale, increasing intentionality about this process will benefit the long-term sustainability of the scientific software stack.}, keywords = {communication avoiding algorithms, DATAFLOW scheduling runtimes, hardware accelerators}, doi = {10.1016/j.jocs.2020.101216}, author = {Jack Dongarra and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @techreport {, title = {ASCR@40: Four Decades of Department of Energy Leadership in Advanced Scientific Computing Research}, year = {2020}, month = {2020-08}, publisher = {Advanced Scientific Computing Advisory Committee (ASCAC), US Department of Energy}, url = {https://computing.llnl.gov/misc/ASCR@40-Highlights.pdf}, author = {Bruce Hendrickson and Paul Messina and Buddy Bland and Jackie Chen and Phil Colella and Eli Dart and Jack Dongarra and Thom Dunning and Ian Foster and Richard Gerber and Rachel Harken and Wendy Huntoon and Bill Johnston and John Sarrao and Jeff Vetter} } @techreport {, title = {ASCR@40: Highlights and Impacts of ASCR{\textquoteright}s Programs}, year = {2020}, month = {2020-06}, publisher = {US Department of Energy{\textquoteright}s Office of Advanced Scientific Computing Research}, abstract = {The Office of Advanced Scientific Computing Research (ASCR) sits within the Office of Science in the Department of Energy (DOE). Per their web pages, {\textquotedblleft}the mission of the ASCR program is to discover, develop, and deploy computational and networking capabilities to analyze, model, simulate, and predict complex phenomena important to the DOE.{\textquotedblright} This succinct statement encompasses a wide range of responsibilities for computing and networking facilities; for procuring, deploying, and operating high performance computing, networking, and storage resources; for basic research in mathematics and computer science; for developing and sustaining a large body of software; and for partnering with organizations across the Office of Science and beyond. While its mission statement may seem very contemporary, the roots of ASCR are quite deep{\textemdash}long predating the creation of DOE. Applied mathematics and advanced computing were both elements of the Theoretical Division of the Manhattan Project. In the early 1950s, the Manhattan Project scientist and mathematician John von Neumann, then a commissioner for the AEC (Atomic Energy Commission), advocated for the creation of a Mathematics program to support the continued development and applications of digital computing. Los Alamos National Laboratory (LANL) scientist John Pasta created such a program to fund researchers at universities and AEC laboratories. Under several organizational name changes, this program has persisted ever since, and would eventually grow to become ASCR.}, doi = {https://doi.org/10.2172/1631812}, url = {https://www.osti.gov/servlets/purl/1631812}, author = {Bruce Hendrickson and Paul Messina and Buddy Bland and Jackie Chen and Phil Colella and Eli Dart and Jack Dongarra and Thom Dunning and Ian Foster and Richard Gerber and Rachel Harken and Wendy Huntoon and Bill Johnston and John Sarrao and Jeff Vetter} } @techreport {1465, title = {Asynchronous SGD for DNN Training on Shared-Memory Parallel Architectures}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-04}, year = {2020}, month = {2020-03}, publisher = {University of Tennessee, Knoxville}, abstract = {We present a parallel asynchronous Stochastic Gradient Descent algorithm for shared memory architectures. Different from previous asynchronous algorithms, we consider the case where the gradient updates are not particularly sparse. In the context of the MagmaDNN framework, we compare the parallel efficiency of the asynchronous implementation with that of the traditional synchronous implementation. Tests are performed for training deep neural networks on multicore CPUs and GPU devices.}, keywords = {Asynchronous iterative methods, Deep learning, gpu, multicore CPU, Stochastic Gradient Descent}, author = {Florent Lopez and Edmond Chow and Stanimire Tomov and Jack Dongarra} } @conference {1485, title = {Asynchronous SGD for DNN Training on Shared-Memory Parallel Architectures}, booktitle = {Workshop on Scalable Deep Learning over Parallel And Distributed Infrastructures (ScaDL 2020)}, year = {2020}, month = {2020-05}, author = {Florent Lopez and Edmond Chow and Stanimire Tomov and Jack Dongarra} } @techreport {, title = {CEED ECP Milestone Report: Improve Performance and Capabilities of CEED-Enabled ECP Applications on Summit/Sierra}, journal = {ECP Milestone Reports}, year = {2020}, month = {2020-05}, publisher = {Zenodo}, doi = {https://doi.org/10.5281/zenodo.3860804}, url = {https://doi.org/10.5281/zenodo.3860804}, author = {Kolev, Tzanio and Fischer, Paul and Abdelfattah, Ahmad and Ananthan, Shreyas and Valeria Barra and Natalie Beams and Bleile, Ryan and Brown, Jed and Carson, Robert and Camier, Jean-Sylvain and Churchfield, Matthew and Dobrev, Veselin and Jack Dongarra and Dudouit, Yohann and Karakus, Ali and Kerkemeier, Stefan and Lan, YuHsiang and Medina, David and Merzari, Elia and Min, Misun and Parker, Scott and Ratnayaka, Thilina and Smith, Cameron and Sprague, Michael and Stitt, Thomas and Thompson, Jeremy and Tomboulides, Ananias and Stanimire Tomov and Tomov, Vladimir and Vargas, Arturo and Warburton, Tim and Weiss, Kenneth} } @article {, title = {Clover: Computational Libraries Optimized via Exascale Research}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Mark Gates and Stanimire Tomov and Hartwig Anzt and Piotr Luszczek and Jack Dongarra} } @conference {1478, title = {Communication Avoiding 2D Stencil Implementations over PaRSEC Task-Based Runtime}, booktitle = {2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2020}, month = {2020-05}, publisher = {IEEE}, organization = {IEEE}, address = {New Orleans, LA}, abstract = {Stencil computation or general sparse matrix-vector product (SpMV) are key components in many algorithms like geometric multigrid or Krylov solvers. But their low arithmetic intensity means that memory bandwidth and network latency will be the performance limiting factors. The current architectural trend favors computations over bandwidth, worsening the already unfavorable imbalance. Previous work approached stencil kernel optimization either by improving memory bandwidth usage or by providing a Communication Avoiding (CA) scheme to minimize network latency in repeated sparse vector multiplication by replicating remote work in order to delay communications on the critical path. Focusing on minimizing communication bottleneck in distributed stencil computation, in this study we combine a CA scheme with the computation and communication overlapping that is inherent in a dataflow task-based runtime system such as PaRSEC to demonstrate their combined benefits. We implemented the 2D five point stencil (Jacobi iteration) in PETSc, and over PaRSEC in two flavors, full communications (base-PaRSEC) and CA-PaRSEC which operate directly on a 2D compute grid. Our results running on two clusters, NaCL and Stampede2 indicate that we can achieve 2{\texttimes} speedup over the standard SpMV solution implemented in PETSc, and in certain cases when kernel execution is not dominating the execution time, the CA-PaRSEC version achieved up to 57\% and 33\% speedup over base-PaRSEC implementation on NaCL and Stampede2 respectively.}, doi = {https://doi.org/10.1109/IPDPSW50202.2020.00127}, author = {Yu Pei and Qinglei Cao and George Bosilca and Piotr Luszczek and Victor Eijkhout and Jack Dongarra} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part IV}, series = {Lecture Notes in Computer Science}, number = {12140}, year = {2020}, month = {2020-06}, pages = {668}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50423-6}, doi = {https://doi.org/10.1007/978-3-030-50423-6}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part VII}, series = {Lecture Notes in Computer Science}, number = {12143}, year = {2020}, month = {2020-06}, pages = {775}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50436-6}, doi = {https://doi.org/10.1007/978-3-030-50436-6}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part VI}, series = {Lecture Notes in Computer Science}, number = {12142}, year = {2020}, month = {2020-06}, pages = {667}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50433-5}, doi = {https://doi.org/10.1007/978-3-030-50433-5}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part V}, series = {Lecture Notes in Computer Science}, number = {12141}, year = {2020}, month = {2020-06}, pages = {618}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50426-7}, doi = {https://doi.org/10.1007/978-3-030-50426-7}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part III}, series = {Lecture Notes in Computer Science}, number = {12139}, year = {2020}, month = {2020-06}, pages = {648}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50420-5}, doi = {https://doi.org/10.1007/978-3-030-50420-5}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part II}, series = {Lecture Notes in Computer Science}, number = {12138}, year = {2020}, month = {2020-06}, pages = {697}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50417-5}, doi = {https://doi.org/10.1007/978-3-030-50417-5}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @book {, title = {Computational Science {\textendash} ICCS 2020: 20th International Conference, Amsterdam, The Netherlands, June 3{\textendash}5, 2020, Proceedings, Part I}, series = {Lecture Notes in Computer Science}, number = {12137}, year = {2020}, month = {2020-06}, pages = {707}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-50371-0}, doi = {https://doi.org/10.1007/978-3-030-50371-0}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @techreport {, title = {Design, Optimization, and Benchmarking of Dense Linear Algebra Algorithms on AMD GPUs}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-12}, year = {2020}, month = {2020-08}, publisher = {University of Tennessee}, abstract = {Dense linear algebra (DLA) has historically been in the vanguard of software that must be adapted first to hardware changes. This is because DLA is both critical to the accuracy and performance of so many different types of applications, and because they have proved to be outstanding vehicles for finding and implementing solutions to the problems that novel architectures pose. Therefore, in this paper we investigate the portability of the MAGMA DLA library to the latest AMD GPUs. We use auto tools to convert the CUDA code in MAGMA to the HeterogeneousComputing Interface for Portability (HIP) language. MAGMA provides LAPACK for GPUs and benchmarks for fundamental DLA routines ranging from BLAS to dense factorizations, linear systems and eigen-problem solvers. We port these routines to HIP and quantify currently achievable performance through the MAGMA benchmarks for the main workload algorithms on MI25 and MI50 AMD GPUs. Comparison with performance roofline models and theoretical expectations are used to identify current limitations and directions for future improvements.}, keywords = {AMD GPUs, GPU computing, HIP Runtime, HPC, numerical linear algebra, Portability}, author = {Cade Brown and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @conference {, title = {Design, Optimization, and Benchmarking of Dense Linear Algebra Algorithms on AMD GPUs}, booktitle = {2020 IEEE High Performance Extreme Computing Virtual Conference}, year = {2020}, month = {2020-09}, publisher = {IEEE}, organization = {IEEE}, abstract = {Dense linear algebra (DLA) has historically been in the vanguard of software that must be adapted first to hardware changes. This is because DLA is both critical to the accuracy and performance of so many different types of applications, and because they have proved to be outstanding vehicles for finding and implementing solutions to the problems that novel architectures pose. Therefore, in this paper we investigate the portability of the MAGMA DLA library to the latest AMD GPUs. We use auto tools to convert the CUDA code in MAGMA to the HeterogeneousComputing Interface for Portability (HIP) language. MAGMA provides LAPACK for GPUs and benchmarks for fundamental DLA routines ranging from BLAS to dense factorizations, linear systems and eigen-problem solvers. We port these routines to HIP and quantify currently achievable performance through the MAGMA benchmarks for the main workload algorithms on MI25 and MI50 AMD GPUs. Comparison with performance roofline models and theoretical expectations are used to identify current limitations and directions for future improvements.}, author = {Cade Brown and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {, title = {DTE: PaRSEC Enabled Libraries and Applications (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {George Bosilca and Thomas Herault and Jack Dongarra} } @article {, title = {DTE: PaRSEC Systems and Interfaces (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {George Bosilca and Thomas Herault and Jack Dongarra} } @conference {, title = {Evaluating the Performance of NVIDIA{\textquoteright}s A100 Ampere GPU for Sparse and Batched Computations}, booktitle = {2020 IEEE/ACM Workshop on Performance Modeling, Benchmarking and Simulation of High Performance Computer Systems (PMBS)}, year = {2020}, month = {2020-11}, publisher = {IEEE}, organization = {IEEE}, abstract = {GPU accelerators have become an important backbone for scientific high performance-computing, and the performance advances obtained from adopting new GPU hardware are significant. In this paper we take a first look at NVIDIA{\textquoteright}s newest server-line GPU, the A100 architecture, part of the Ampere generation. Specifically, we assess its performance for sparse and batch computations, as these routines are relied upon in many scientific applications, and compare to the p}, keywords = {Batched linear algebra, NVIDIA A100 GPU, sparse linear algebra, Sparse Matrix Vector Product}, author = {Hartwig Anzt and Yuhsiang M. Tsai and Ahmad Abdelfattah and Terry Cojean and Jack Dongarra} } @article {, title = {Exa-PAPI: The Exascale Performance API with Modern C++}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @conference {, title = {Extreme-Scale Task-Based Cholesky Factorization Toward Climate and Weather Prediction Applications}, booktitle = {Platform for Advanced Scientific Computing Conference (PASC20)}, year = {2020}, month = {2020-06}, publisher = {ACM}, organization = {ACM}, address = {Geneva, Switzerland}, abstract = {Climate and weather can be predicted statistically via geospatial Maximum Likelihood Estimates (MLE), as an alternative to running large ensembles of forward models. The MLE-based iterative optimization procedure requires the solving of large-scale linear systems that performs a Cholesky factorization on a symmetric positive-definite covariance matrix---a demanding dense factorization in terms of memory footprint and computation. We propose a novel solution to this problem: at the mathematical level, we reduce the computational requirement by exploiting the data sparsity structure of the matrix off-diagonal tiles by means of low-rank approximations; and, at the programming-paradigm level, we integrate PaRSEC, a dynamic, task-based runtime to reach unparalleled levels of efficiency for solving extreme-scale linear algebra matrix operations. The resulting solution leverages fine-grained computations to facilitate asynchronous execution while providing a flexible data distribution to mitigate load imbalance. Performance results are reported using 3D synthetic datasets up to 42M geospatial locations on 130, 000 cores, which represent a cornerstone toward fast and accurate predictions of environmental applications.}, doi = {https://doi.org/10.1145/3394277.3401846}, author = {Qinglei Cao and Yu Pei and Kadir Akbudak and Aleksandr Mikhalev and George Bosilca and Hatem Ltaief and David Keyes and Jack Dongarra} } @techreport {1461, title = {FFT-ECP API and High-Performance Library Prototype for 2-D and 3-D FFTs on Large-Scale Heterogeneous Systems with GPUs}, journal = {ECP Milestone Report}, number = {FFT-ECP STML13-27}, year = {2020}, note = {revision 01-2020}, month = {2020-01}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {ECP WBS 2.3.3.13 Milestone Report}, author = {Stanimire Tomov and Alan Ayala and Azzam Haidar and Jack Dongarra} } @conference {, title = {Flexible Data Redistribution in a Task-Based Runtime System}, booktitle = {IEEE International Conference on Cluster Computing (Cluster 2020)}, year = {2020}, month = {2020-09}, publisher = {IEEE}, organization = {IEEE}, address = {Kobe, Japan}, abstract = {Data redistribution aims to reshuffle data to optimize some objective for an algorithm. The objective can be multi-dimensional, such as improving computational load balance or decreasing communication volume or cost, with the ultimate goal to increase the efficiency and therefore decrease the time-to-solution for the algorithm. The classical redistribution problem focuses on optimally scheduling communications when reshuffling data between two regular, usually block-cyclic, data distributions. Recently, task-based runtime systems have gained popularity as a potential candidate to address the programming complexity on the way to exascale. In addition to an increase in portability against complex hardware and software systems, task-based runtime systems have the potential to be able to more easily cope with less-regular data distribution, providing a more balanced computational load during the lifetime of the execution. In this scenario, it becomes paramount to develop a general redistribution algorithm for task-based runtime systems, which could support all types of regular and irregular data distributions. In this paper, we detail a flexible redistribution algorithm, capable of dealing with redistribution problems without constraints of data distribution and data size and implement it in a task-based runtime system, PaRSEC. Performance results show great capability compared to ScaLAPACK, and applications highlight an increased efficiency with little overhead in terms of data distribution and data size.}, doi = {https://doi.org/10.1109/CLUSTER49012.2020.00032}, author = {Qinglei Cao and George Bosilca and Wei Wu and Dong Zhong and Aurelien Bouteiller and Jack Dongarra} } @techreport {1457, title = {Formulation of Requirements for New PAPI++ Software Package: Part I: Survey Results}, journal = {PAPI++ Working Notes}, number = {1, ICL-UT-20-02}, year = {2020}, month = {2020-01}, publisher = {Innovative Computing Laboratory, University of Tennessee Knoxville}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {, title = {Ginkgo: A Node-Level Sparse Linear Algebra Library for HPC (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Hartwig Anzt and Terry Cojean and Yen-Chen Chen and Fritz Goebel and Thomas Gruetzmacher and Pratik Nayak and Tobias Ribizel and Yu-Hsiang Tsai and Jack Dongarra} } @conference {, title = {HAN: A Hierarchical AutotuNed Collective Communication Framework}, booktitle = {IEEE Cluster Conference}, year = {2020}, month = {2020-09}, publisher = {Best Paper Award, IEEE Computer Society Press}, organization = {Best Paper Award, IEEE Computer Society Press}, address = {Kobe, Japan}, abstract = {High-performance computing (HPC) systems keep growing in scale and heterogeneity to satisfy the increasing computational need, and this brings new challenges to the design of MPI libraries, especially with regard to collective operations. To address these challenges, we present "HAN," a new hierarchical autotuned collective communication framework in Open MPI, which selects suitable homogeneous collective communication modules as submodules for each hardware level, uses collective operations from the submodules as tasks, and organizes these tasks to perform efficient hierarchical collective operations. With a task-based design, HAN can easily swap out submodules, while keeping tasks intact, to adapt to new hardware. This makes HAN suitable for the current platform and provides a strong and flexible support for future HPC systems. To provide a fast and accurate autotuning mechanism, we present a novel cost model based on benchmarking the tasks instead of a whole collective operation. This method drastically reduces tuning time, as the cost of tasks can be reused across different message sizes, and is more accurate than existing cost models. Our cost analysis suggests the autotuning component can find the optimal configuration in most cases. The evaluation of the HAN framework suggests our design significantly improves the default Open MPI and achieves decent speedups against state-of-the-art MPI implementations on tested applications.}, author = {Xi Luo and Wei Wu and George Bosilca and Yu Pei and Qinglei Cao and Thananon Patinyasakdikul and Dong Zhong and Jack Dongarra} } @inbook {1477, title = {Harnessing the Computing Continuum for Programming Our World}, booktitle = { Fog Computing: Theory and Practice}, year = {2020}, publisher = {John Wiley \& Sons, Inc.}, organization = {John Wiley \& Sons, Inc.}, chapter = {7}, abstract = {This chapter outlines a vision for how best to harness the computing continuum of interconnected sensors, actuators, instruments, and computing systems, from small numbers of very large devices to large numbers of very small devices. The hypothesis is that only via a continuum perspective one can intentionally specify desired continuum actions and effectively manage outcomes and systemic properties{\textemdash}adaptability and homeostasis, temporal constraints and deadlines{\textemdash}and elevate the discourse from device programming to intellectual goals and outcomes. Development of a framework for harnessing the computing continuum would catalyze new consumer services, business processes, social services, and scientific discovery. Realizing and implementing a continuum programming model requires balancing conflicting constraints and translating the high-level specification into a form suitable for execution on a unifying abstract machine model. In turn, the abstract machine must implement the mapping of specification demands to end-to-end resources.}, isbn = {9781119551713}, doi = {https://doi.org/10.1002/9781119551713.ch7}, author = {Pete Beckman and Jack Dongarra and Nicola Ferrier and Geoffrey Fox and Terry Moore and Dan Reed and Micah Beck} } @conference {1481, title = {heFFTe: Highly Efficient FFT for Exascale}, booktitle = {International Conference on Computational Science (ICCS 2020)}, year = {2020}, month = {2020-06}, address = {Amsterdam, Netherlands}, abstract = {Exascale computing aspires to meet the increasing demands from large scientific applications. Software targeting exascale is typically designed for heterogeneous architectures; henceforth, it is not only important to develop well-designed software, but also make it aware of the hardware architecture and efficiently exploit its power. Currently, several and diverse applications, such as those part of the Exascale Computing Project (ECP) in the United States, rely on efficient computation of the Fast Fourier Transform (FFT). In this context, we present the design and implementation of heFFTe (Highly Efficient FFT for Exascale) library, which targets the upcoming exascale supercomputers. We provide highly (linearly) scalable GPU kernels that achieve more than 40{\texttimes} speedup with respect to local kernels from CPU state-of-the-art libraries, and over 2{\texttimes} speedup for the whole FFT computation. A communication model for parallel FFTs is also provided to analyze the bottleneck for large-scale problems. We show experiments obtained on Summit supercomputer at Oak Ridge National Laboratory, using up to 24,576 IBM Power9 cores and 6,144 NVIDIA V-100 GPUs.}, keywords = {exascale, FFT, gpu, scalable algorithm}, doi = {https://doi.org/10.1007/978-3-030-50371-0_19}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-10}, publisher = {NVIDIA GPU Technology Conference (GTC2020)}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-02}, publisher = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP20)}, address = {Seattle, WA}, abstract = {Considered one of the top 10 algorithms of the 20th century, the Fast Fourier Transform (FFT) is widely used by applications in science and engineering. Large scale parallel applications targeting exascale, such as those part of the DOE Exascale Computing Project (ECP), are designed for heterogeneous architectures and, currently, more than a dozen ECP applications use FFTs in their codes. To address the applications needs, we developed the highly efficient FFTs for exascale (heFFTe) library. The heFFTe library release features very good weak and strong scalability and performance that is close to 90\% of the roofline peak performance. We present these performance results on the Summit supercomputer. heFFTe is also integrated in a number of applications and we present how the overall performance gets improved by using hFFTe. Performance model, limitations, and challenges are discussed for current and upcoming computer architectures.}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Alan Ayala and Stanimire Tomov and Jack Dongarra and Azzam Haidar} } @conference {, title = {High-Order Finite Element Method using Standard and Device-Level Batch GEMM on GPUs}, booktitle = {2020 IEEE/ACM 11th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA)}, year = {2020}, month = {2020-11}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present new GPU implementations of the tensor contractions arising from basis-related computations for highorder finite element methods. We consider both tensor and nontensor bases. In the case of tensor bases, we introduce new kernels based on a series of fused device-level matrix multiplications (GEMMs), specifically designed to utilize the fast memory of the GPU. For non-tensor bases, we develop a tuned framework for choosing standard batch-BLAS GEMMs that will maximize performance across groups of elements. The implementations are included in a backend of the libCEED library. We present benchmark results for the diffusion and mass operators using libCEED integration through the MFEM finite element library and compare to those of the previously best-performing GPU backends for stand-alone basis computations. In tensor cases, we see improvements of approximately 10-30\% for some cases, particularly for higher basis orders. For the non-tensor tests, the new batch-GEMMs implementation is twice as fast as what was previously available for basis function order greater than five and greater than approximately 105 degrees of freedom in the mesh; up to ten times speedup is seen for eighth-order basis functions.}, keywords = {Batched linear algebra, finite elements, gpu, high-order methods, matrix-free FEM, Tensor contractions}, author = {Natalie Beams and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra and Tzanio Kolev and Yohann Dudouit} } @booklet {, title = {hipMAGMA v1.0}, year = {2020}, month = {2020-03}, publisher = {Zenodo}, doi = {10.5281/zenodo.3908549}, url = {https://doi.org/10.5281/zenodo.3908549}, author = {Cade Brown and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @booklet {, title = {hipMAGMA v2.0}, year = {2020}, month = {2020-07}, publisher = {Zenodo}, doi = {10.5281/zenodo.3928667}, url = {https://doi.org/10.5281/zenodo.3928667}, author = {Cade Brown and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @conference {1482, title = {Improving the Performance of the GMRES Method using Mixed-Precision Techniques}, booktitle = {Smoky Mountains Computational Sciences \& Engineering Conference (SMC2020)}, year = {2020}, month = {2020-08}, abstract = {The GMRES method is used to solve sparse, non-symmetric systems of linear equations arising from many scientific applications. The solver performance within a single node is memory bound, due to the low arithmetic intensity of its computational kernels. To reduce the amount of data movement, and thus, to improve performance, we investigated the effect of using a mix of single and double precision while retaining double-precision accuracy. Previous efforts have explored reduced precision in the preconditioner, but the use of reduced precision in the solver itself has received limited attention. We found that GMRES only needs double precision in computing the residual and updating the approximate solution to achieve double-precision accuracy, although it must restart after each improvement of single-precision accuracy. This finding holds for the tested orthogonalization schemes: Modified Gram-Schmidt (MGS) and Classical Gram-Schmidt with Re-orthogonalization (CGSR). Furthermore, our mixed-precision GMRES, when restarted at least once, performed 19\% and 24\% faster on average than double-precision GMRES for MGS and CGSR, respectively. Our implementation uses generic programming techniques to ease the burden of coding implementations for different data types. Our use of the Kokkos library allowed us to exploit parallelism and optimize data management. Additionally, KokkosKernels was used when producing performance results. In conclusion, using a mix of single and double precision in GMRES can improve performance while retaining double-precision accuracy.}, keywords = {Kokkos, Krylov subspace methods, linear algebra, mixed precision}, author = {Neil Lindquist and Piotr Luszczek and Jack Dongarra} } @article {, title = {Integrating Deep Learning in Domain Science at Exascale (MagmaDNN)}, year = {2020}, month = {2020-12}, publisher = {DOD HPCMP seminar}, address = {virtual}, abstract = {We will present some of the current challenges in the design and integration of deep learning AI with traditional HPC simulations. We evaluate existing packages for readiness to run efficiently deep learning models and applications on large scale HPC systems, identify challenges, and propose new asynchronous parallelization and optimization techniques for current large-scale heterogeneous systems and up-coming exascale systems. These developments, along with existing HPC AI software capabilities, have been integrated in MagmaDNN, an open source HPC deep learning framework. Many deep learning frameworks are targeted towards data scientists and fall short in providing quality integration into existing HPC workflows. This paper discusses the necessities of an HPC deep learning framework and how these can be provided, e.g., as in MagmaDNN, through a deep integration with existing HPC libraries such as MAGMA and its modular memory management, MPI, CuBLAS, CuDNN, MKL, and HIP. Advancements are also illustrated through the use of algorithmic enhancements in reduced and mixed-precision and asynchronous optimization methods. Finally, we present illustrations and potential solutions on enhancing traditional compute and data intensive applications at ORNL and UTK with AI. The approaches and future challenges are illustrated on materials science, imaging, and climate applications.}, author = {Stanimire Tomov and Kwai Wong and Jack Dongarra and Rick Archibald and Edmond Chow and Eduardo D{\textquoteright}Azevedo and Markus Eisenbach and Rocco Febbo and Florent Lopez and Daniel Nichols and Junqi Yin} } @techreport {, title = {Integrating Deep Learning in Domain Sciences at Exascale}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-10}, year = {2020}, month = {2020-08}, publisher = {University of Tennessee}, abstract = {This paper presents some of the current challenges in designing deep learning artificial intelligence (AI) and integrating it with traditional high-performance computing (HPC) simulations. We evaluate existing packages for their ability to run deep learning models and applications on large-scale HPC systems e ciently, identify challenges, and propose new asynchronous parallelization and optimization techniques for current large-scale heterogeneous systems and upcoming exascale systems. These developments, along with existing HPC AI software capabilities, have been integrated into MagmaDNN, an open-source HPC deep learning framework. Many deep learning frameworks are targeted at data scientists and fall short in providing quality integration into existing HPC workflows. This paper discusses the necessities of an HPC deep learning framework and how those needs can be provided (e.g., as in MagmaDNN) through a deep integration with existing HPC libraries, such as MAGMA and its modular memory management, MPI, CuBLAS, CuDNN, MKL, and HIP. Advancements are also illustrated through the use of algorithmic enhancements in reduced- and mixed-precision, as well as asynchronous optimization methods. Finally, we present illustrations and potential solutions for enhancing traditional compute- and data-intensive applications at ORNL and UTK with AI. The approaches and future challenges are illustrated in materials science, imaging, and climate applications.}, author = {Rick Archibald and Edmond Chow and Eduardo D{\textquoteright}Azevedo and Jack Dongarra and Markus Eisenbach and Rocco Febbo and Florent Lopez and Daniel Nichols and Stanimire Tomov and Kwai Wong and Junqi Yin} } @conference {, title = {Integrating Deep Learning in Domain Sciences at Exascale}, booktitle = {2020 Smoky Mountains Computational Sciences and Engineering Conference (SMC 2020)}, year = {2020}, month = {2020-08}, abstract = {This paper presents some of the current challenges in designing deep learning artificial intelligence (AI) and integrating it with traditional high-performance computing (HPC) simulations. We evaluate existing packages for their ability to run deep learning models and applications on large-scale HPC systems e ciently, identify challenges, and propose new asynchronous parallelization and optimization techniques for current large-scale heterogeneous systems and upcoming exascale systems. These developments, along with existing HPC AI software capabilities, have been integrated into MagmaDNN, an open-source HPC deep learning framework. Many deep learning frameworks are targeted at data scientists and fall short in providing quality integration into existing HPC workflows. This paper discusses the necessities of an HPC deep learning framework and how those needs can be provided (e.g., as in MagmaDNN) through a deep integration with existing HPC libraries, such as MAGMA and its modular memory management, MPI, CuBLAS, CuDNN, MKL, and HIP. Advancements are also illustrated through the use of algorithmic enhancements in reduced- and mixed-precision, as well as asynchronous optimization methods. Finally, we present illustrations and potential solutions for enhancing traditional compute- and data-intensive applications at ORNL and UTK with AI. The approaches and future challenges are illustrated in materials science, imaging, and climate applications.}, author = {Rick Archibald and Edmond Chow and Eduardo D{\textquoteright}Azevedo and Jack Dongarra and Markus Eisenbach and Rocco Febbo and Florent Lopez and Daniel Nichols and Stanimire Tomov and Kwai Wong and Junqi Yin} } @conference {1480, title = {Investigating the Benefit of FP16-Enabled Mixed-Precision Solvers for Symmetric Positive Definite Matrices using GPUs}, booktitle = {International Conference on Computational Science (ICCS 2020)}, year = {2020}, month = {2020-06}, publisher = {Springer, Cham}, organization = {Springer, Cham}, address = {Amsterdam, Netherlands}, abstract = {Half-precision computation refers to performing floating-point operations in a 16-bit format. While half-precision has been driven largely by machine learning applications, recent algorithmic advances in numerical linear algebra have discovered beneficial use cases for half precision in accelerating the solution of linear systems of equations at higher precisions. In this paper, we present a high-performance, mixed-precision linear solver (Ax = b) for symmetric positive definite systems in double-precision using graphics processing units (GPUs). The solver is based on a mixed-precision Cholesky factorization that utilizes the high-performance tensor core units in CUDA-enabled GPUs. Since the Cholesky factors are affected by the low precision, an iterative refinement (IR) solver is required to recover the solution back to double-precision accuracy. Two different types of IR solvers are discussed on a wide range of test matrices. A preprocessing step is also developed, which scales and shifts the matrix, if necessary, in order to preserve its positive-definiteness in lower precisions. Our experiments on the V100 GPU show that performance speedups are up to 4.7{\texttimes} against a direct double-precision solver. However, matrix properties such as the condition number and the eigenvalue distribution can affect the convergence rate, which would consequently affect the overall performance.}, doi = {https://doi.org/10.1007/978-3-030-50417-5_18}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {, title = {Load-Balancing Sparse Matrix Vector Product Kernels on GPUs}, journal = {ACM Transactions on Parallel Computing}, volume = {7}, year = {2020}, month = {2020-03}, abstract = {Efficient processing of Irregular Matrices on Single Instruction, Multiple Data (SIMD)-type architectures is a persistent challenge. Resolving it requires innovations in the development of data formats, computational techniques, and implementations that strike a balance between thread divergence, which is inherent for Irregular Matrices, and padding, which alleviates the performance-detrimental thread divergence but introduces artificial overheads. To this end, in this article, we address the challenge of designing high performance sparse matrix-vector product (SpMV) kernels designed for Nvidia Graphics Processing Units (GPUs). We present a compressed sparse row (CSR) format suitable for unbalanced matrices. We also provide a load-balancing kernel for the coordinate (COO) matrix format and extend it to a hybrid algorithm that stores part of the matrix in SIMD-friendly Ellpack format (ELL) format. The ratio between the ELL- and the COO-part is determined using a theoretical analysis of the nonzeros-per-row distribution. For the over 2,800 test matrices available in the Suite Sparse matrix collection, we compare the performance against SpMV kernels provided by NVIDIA{\textquoteright}s cuSPARSE library and a heavily-tuned sliced ELL (SELL-P) kernel that prevents unnecessary padding by considering the irregular matrices as a combination of matrix blocks stored in ELL format.}, doi = {https://doi.org/10.1145/3380930}, author = {Hartwig Anzt and Terry Cojean and Chen Yen-Chen and Jack Dongarra and Goran Flegar and Pratik Nayak and Stanimire Tomov and Yuhsiang M. Tsai and Weichung Wang} } @article {, title = {MAGMA Templates for Scalable Linear Algebra on Emerging Architectures}, journal = {The International Journal of High Performance Computing Applications}, volume = {34}, year = {2020}, month = {2020-11}, pages = {645-658}, abstract = {With the acquisition and widespread use of more resources that rely on accelerator/wide vector{\textendash}based computing, there has been a strong demand for science and engineering applications to take advantage of these latest assets. This, however, has been extremely challenging due to the diversity of systems to support their extreme concurrency, complex memory hierarchies, costly data movement, and heterogeneous node architectures. To address these challenges, we design a programming model and describe its ease of use in the development of a new MAGMA Templates library that delivers high-performance scalable linear algebra portable on current and emerging architectures. MAGMA Templates derives its performance and portability by (1) building on existing state-of-the-art linear algebra libraries, like MAGMA, SLATE, Trilinos, and vendor-optimized math libraries, and (2) providing access (seamlessly to the users) to the latest algorithms and architecture-specific optimizations through a single, easy-to-use C++-based API.}, issn = {1094-3420}, doi = {https://doi.org/10.1177/1094342020938421}, author = {Mohammed Al Farhan and Ahmad Abdelfattah and Stanimire Tomov and Mark Gates and Dalal Sukkari and Azzam Haidar and Robert Rosenberg and Jack Dongarra} } @article {, title = {Matrix Multiplication on Batches of Small Matrices in Half and Half-Complex Precisions}, journal = {Journal of Parallel and Distributed Computing}, volume = {145}, year = {2020}, month = {2020-11}, pages = {188-201}, abstract = {Machine learning and artificial intelligence (AI) applications often rely on performing many small matrix operations{\textemdash}in particular general matrix{\textendash}matrix multiplication (GEMM). These operations are usually performed in a reduced precision, such as the 16-bit floating-point format (i.e., half precision or FP16). The GEMM operation is also very important for dense linear algebra algorithms, and half-precision GEMM operations can be used in mixed-precision linear solvers. Therefore, high-performance batched GEMM operations in reduced precision are significantly important, not only for deep learning frameworks, but also for scientific applications that rely on batched linear algebra, such as tensor contractions and sparse direct solvers. This paper presents optimized batched GEMM kernels for graphics processing units (GPUs) in FP16 arithmetic. The paper addresses both real and complex half-precision computations on the GPU. The proposed design takes advantage of the Tensor Core technology that was recently introduced in CUDA-enabled GPUs. With eight tuning parameters introduced in the design, the developed kernels have a high degree of flexibility that overcomes the limitations imposed by the hardware and software (in the form of discrete configurations for the Tensor Core APIs). For real FP16 arithmetic, performance speedups are observed against cuBLAS for sizes up to 128, and range between and . For the complex FP16 GEMM kernel, the speedups are between and thanks to a design that uses the standard interleaved matrix layout, in contrast with the planar layout required by the vendor{\textquoteright}s solution. The paper also discusses special optimizations for extremely small matrices, where even higher performance gains are achievable.}, doi = {https://doi.org/10.1016/j.jpdc.2020.07.001}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {, title = {Mixed-Precision Iterative Refinement using Tensor Cores on GPUs to Accelerate Solution of Linear Systems}, journal = {Proceedings of the Royal Society A}, volume = {476}, year = {2020}, month = {2020-11}, abstract = {Double-precision floating-point arithmetic (FP64) has been the de facto standard for engineering and scientific simulations for several decades. Problem complexity and the sheer volume of data coming from various instruments and sensors motivate researchers to mix and match various approaches to optimize compute resources, including different levels of floating-point precision. In recent years, machine learning has motivated hardware support for half-precision floating-point arithmetic. A primary challenge in high-performance computing is to leverage reduced-precision and mixed-precision hardware. We show how the FP16/FP32 Tensor Cores on NVIDIA GPUs can be exploited to accelerate the solution of linear systems of equations Ax = b without sacrificing numerical stability. The techniques we employ include multiprecision LU factorization, the preconditioned generalized minimal residual algorithm (GMRES), and scaling and auto-adaptive rounding to avoid overflow. We also show how to efficiently handle systems with multiple right-hand sides. On the NVIDIA Quadro GV100 (Volta) GPU, we achieve a 4{\texttimes}-5{\texttimes} performance increase and 5{\texttimes} better energy efficiency versus the standard FP64 implementation while maintaining an FP64 level of numerical stability.}, keywords = {GMRESLU factorization, GPU computing, half precision arithmetic, iterative refinement, mixed precision solvers}, issn = {1471-2946}, doi = {https://doi.org/10.1098/rspa.2020.0110}, author = {Azzam Haidar and Harun Bayraktar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @techreport {1471, title = {Mixed-Precision Solution of Linear Systems Using Accelerator-Based Computing}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-05}, year = {2020}, month = {2020-05}, publisher = {University of Tennessee}, abstract = {Double-precision floating-point arithmetic (FP64) has been the de facto standard for engineering and scientific simulations for several decades. Problem complexity and the sheer volume of data coming from various instruments and sensors motivate researchers to mix and match various approaches to optimize compute resources, including different levels of floating-point precision. In recent years, machine learning has motivated hardware support for half-precision floating-point arithmetic. A primary challenge in high-performance computing is to leverage reduced- and mixed-precision hardware. We show how the FP16/FP32 Tensor Cores on NVIDIA GPUs can be exploited to accelerate the solution of linear systems of equations Ax = b without sacrificing numerical stability. We achieve a 4{\texttimes}{\textendash}5{\texttimes} performance increase and 5{\texttimes} better energy efficiency versus the standard FP64 implementation while maintaining an FP64 level of numerical stability.}, author = {Azzam Haidar and Harun Bayraktar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @article {1476, title = {Numerical Algorithms for High-Performance Computational Science}, journal = {Philosophical Transactions of the Royal Society A}, volume = {378}, year = {2020}, abstract = {A number of features of today{\textquoteright}s high-performance computers make it challenging to exploit these machines fully for computational science. These include increasing core counts but stagnant clock frequencies; the high cost of data movement; use of accelerators (GPUs, FPGAs, coprocessors), making architectures increasingly heterogeneous; and multi- ple precisions of floating-point arithmetic, including half-precision. Moreover, as well as maximizing speed and accuracy, minimizing energy consumption is an important criterion. New generations of algorithms are needed to tackle these challenges. We discuss some approaches that we can take to develop numerical algorithms for high-performance computational science, with a view to exploiting the next generation of supercomputers.}, issn = {1471-2962}, doi = {https://doi.org/10.1098/rsta.2019.0066}, author = {Jack Dongarra and Laura Grigori and Nicholas J. Higham} } @book {, title = {Parallel Processing and Applied Mathematics: 13th International Conference, PPAM 2019, Bialystok, Poland, September 8{\textendash}11, 2019, Revised Selected Papers, Part II}, series = {Lecture Notes in Computer Science}, number = {12044}, year = {2020}, month = {2020-03}, pages = {503}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, isbn = {978-3-030-43222-5}, doi = {https://doi.org/10.1007/978-3-030-43222-5}, author = {Roman Wyrzykowski and Ewa Deelman and Jack Dongarra and Konrad Karczewski} } @book {, title = {Parallel Processing and Applied Mathematics: 13th International Conference, PPAM 2019, Bialystok, Poland, September 8{\textendash}11, 2019, Revised Selected Papers, Part I}, series = { Lecture Notes in Computer Science}, number = {12043}, year = {2020}, month = {2020-03}, pages = {581}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, edition = {1}, isbn = {978-3-030-43229-4}, doi = {https://doi.org/10.1007/978-3-030-43229-4}, author = {Roman Wyrzykowski and Ewa Deelman and Jack Dongarra and Konrad Karczewski} } @article {, title = {Performance Application Programming Interface for Extreme-Scale Environments (PAPI-EX) (Poster)}, year = {2020}, month = {2020-20}, publisher = {2020 NSF Cyberinfrastructure for Sustained Scientific Innovation (CSSI) Principal Investigator Meeting}, address = {Seattle, WA}, author = {Jack Dongarra and Heike Jagode and Anthony Danalis and Daniel Barry and Vince Weaver} } @techreport {1454, title = {Performance Tuning SLATE}, journal = {SLATE Working Notes}, number = {14, ICL-UT-20-01}, year = {2020}, month = {2020-01}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Mark Gates and Ali Charara and Asim YarKhan and Dalal Sukkari and Mohammed Al Farhan and Jack Dongarra} } @article {, title = {The PLASMA Library on CORAL Systems and Beyond (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Piotr Luszczek and Jack Dongarra} } @article {1456, title = {Project-Based Research and Training in High Performance Data Sciences, Data Analytics, and Machine Learning}, journal = {The Journal of Computational Science Education}, volume = {11}, year = {2020}, month = {2020-01}, pages = {36-44}, doi = {https://doi.org/10.22369/issn.2153-4136/11/1/7}, url = {http://www.jocse.org/articles/11/1/7/}, author = {Wong, Kwai and Stanimire Tomov and Jack Dongarra} } @techreport {, title = {Prospectus for the Next LAPACK and ScaLAPACK Libraries: Basic ALgebra LIbraries for Sustainable Technology with Interdisciplinary Collaboration (BALLISTIC)}, journal = {LAPACK Working Notes}, number = {297, ICL-UT-20-07}, year = {2020}, month = {2020/07}, publisher = {University of Tennessee}, abstract = {The convergence of several unprecedented changes, including formidable new system design constraints and revolutionary levels of heterogeneity, has made it clear that much of the essential software infrastructure of computational science and engineering is, or will soon be, obsolete. Math libraries have historically been in the vanguard of software that must be adapted first to such changes, both because these low-level workhorses are so critical to the accuracy and performance of so many different types of applications, and because they have proved to be outstanding vehicles for finding and implementing solutions to the problems that novel architectures pose. Under the Basic ALgebra LIbraries for Sustainable Technology with Interdisciplinary Collaboration (BALLISTIC) project, the principal designers of the Linear Algebra PACKage (LAPACK) and the Scalable Linear Algebra PACKage (ScaLAPACK), the combination of which is abbreviated Sca/LAPACK, aim to enhance and update these libraries for the ongoing revolution in processor architecture, system design, and application requirements by incorporating them into a layered package of software components{\textemdash}the BALLISTIC ecosystem{\textemdash}that provides users seamless access to state-of-the-art solver implementations through familiar and improved Sca/LAPACK interfaces.}, author = {James Demmel and Jack Dongarra and Julie Langou and Julien Langou and Piotr Luszczek and Michael Mahoney} } @article {1466, title = {Reducing the Amount of out-of-core Data Access for GPU-Accelerated Randomized SVD}, journal = {Concurrency and Computation: Practice and Experience}, year = {2020}, month = {2020-04}, keywords = {Divide and conquer, gpu, out-of-core computation, Singular value decomposition}, doi = { https://doi.org/10.1002/cpe.5754}, author = {Yuechao Lu and Ichitaro Yamazaki and Fumihiko Ino and Yasuyuki Matsushita and Stanimire Tomov and Jack Dongarra} } @conference {, title = {Replacing Pivoting in Distributed Gaussian Elimination with Randomized Techniques}, booktitle = {2020 IEEE/ACM 11th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA)}, year = {2020}, month = {2020-11}, publisher = {IEEE}, organization = {IEEE}, address = {Atlanta, GA}, abstract = {Gaussian elimination is a key technique for solving dense, non-symmetric systems of linear equations. Pivoting is used to ensure numerical stability but can introduce significant overheads. We propose replacing pivoting with recursive butterfly transforms (RBTs) and iterative refinement. RBTs use an FFT-like structure and randomized elements to provide an efficient, two-sided preconditioner for factoring. This approach was implemented and tested using Software for Linear Algebra Targeting Exascale (SLATE). In numerical experiments, our implementation was more robust than Gaussian elimination with no pivoting (GENP) but failed to solve all the problems solvable with Gaussian elimination with partial pivoting (GEPP). Furthermore, the proposed solver was able to outperform GEPP when distributed on GPU-accelerated nodes.}, keywords = {linear systems, Randomized algorithms}, author = {Neil Lindquist and Piotr Luszczek and Jack Dongarra} } @techreport {, title = {Report on the Fujitsu Fugaku System}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-06}, year = {2020}, month = {2020-06}, publisher = {University of Tennessee}, author = {Jack Dongarra} } @conference {, title = {Scalable Data Generation for Evaluating Mixed-Precision Solvers}, booktitle = {2020 IEEE High Performance Extreme Computing Conference (HPEC)}, year = {2020}, month = {2020-09}, publisher = {IEEE}, organization = {IEEE}, address = { Waltham, MA, USA}, abstract = {We present techniques of generating data for mixed precision solvers that allows to test those solvers in a scalable manner. Our techniques focus on mixed precision hardware and software where both the solver and the hardware can take advantage of mixing multiple floating precision formats. This allows taking advantage of recently released generation of hardware platforms that focus on ML and DNN workloads but can also be utilized for HPC applications if a new breed of algorithms is combined with the custom floating-point formats to deliver performance levels beyond the standard IEEE data types while delivering a comparable accuracy of the results.}, doi = {https://doi.org/10.1109/HPEC43674.2020.9286145}, author = {Piotr Luszczek and Yaohung Tsai and Neil Lindquist and Hartwig Anzt and Jack Dongarra} } @article {, title = {A Set of Batched Basic Linear Algebra Subprograms}, journal = {ACM Transactions on Mathematical Software}, year = {2020}, month = {2020-10}, abstract = {This paper describes a standard API for a set of Batched Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). The focus is on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The matrices are grouped together in uniformly sized groups, with just one group if all the matrices are of equal size. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance many-core platforms. These include multicore and many-core CPU processors, GPUs and coprocessors, and other hardware accelerators with floating-point compute facility. As well as the standard types of single and double precision, we also include half and quadruple precision in the standard. In particular half precision is used in many very large scale applications, such as those associated with machine learning.}, author = {Ahmad Abdelfattah and Timothy Costa and Jack Dongarra and Mark Gates and Azzam Haidar and Sven Hammarling and Nicholas J. Higham and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Mawussi Zounon} } @techreport {, title = {SLATE Performance Report: Updates to Cholesky and LU Factorizations}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-14}, year = {2020}, month = {2020-10}, publisher = {University of Tennessee}, author = {Asim YarKhan and Mohammed Al Farhan and Dalal Sukkari and Mark Gates and Jack Dongarra} } @article {, title = {SLATE: Software for Linear Algebra Targeting Exascale (POSTER)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Mark Gates and Ali Charara and Jakub Kurzak and Asim YarKhan and Mohammed Al Farhan and Dalal Sukkari and Jack Dongarra} } @article {1464, title = {SLATE Tutorial}, year = {2020}, month = {2020-02}, publisher = {2020 ECP Annual Meeting}, address = {Houston, TX}, author = {Mark Gates and Jakub Kurzak and Asim YarKhan and Ali Charara and Jamie Finney and Dalal Sukkari and Mohammed Al Farhan and Ichitaro Yamazaki and Panruo Wu and Jack Dongarra} } @techreport {1278, title = {SLATE Users{\textquoteright} Guide}, journal = {SLATE Working Notes}, number = {10, ICL-UT-19-01}, year = {2020}, month = {2020-07}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Mark Gates and Ali Charara and Jakub Kurzak and Asim YarKhan and Mohammed Al Farhan and Dalal Sukkari and Jack Dongarra} } @techreport {, title = {A Survey of Numerical Methods Utilizing Mixed Precision Arithmetic}, journal = {SLATE Working Notes}, number = {15, ICL-UT-20-08}, year = {2020}, month = {2020-07}, publisher = {University of Tennessee}, type = {SLATE Working Notes}, author = {Ahmad Abdelfattah and Hartwig Anzt and Erik Boman and Erin Carson and Terry Cojean and Jack Dongarra and Mark Gates and Thomas Gruetzmacher and Nicholas J. Higham and Sherry Li and Neil Lindquist and Yang Liu and Jennifer Loe and Piotr Luszczek and Pratik Nayak and Sri Pranesh and Siva Rajamanickam and Tobias Ribizel and Barry Smith and Kasia Swirydowicz and Stephen Thomas and Stanimire Tomov and Yaohung Tsai and Ichitaro Yamazaki and Urike Meier Yang} } @article {, title = {Translational Process: Mathematical Software Perspective}, journal = {Journal of Computational Science}, year = {2020}, month = {2020-09}, abstract = {Each successive generation of computer architecture has brought new challenges to achieving high performance mathematical solvers, necessitating development and analysis of new algorithms, which are then embodied in software libraries. These libraries hide architectural details from applications, allowing them to achieve a level of portability across platforms from desktops to world-class high performance computing (HPC) systems. Thus there has been an informal translational computer science process of developing algorithms and distributing them in open source software libraries for adoption by applications and vendors. With the move to exascale, increasing intentionality about this process will benefit the long-term sustainability of the scientific software stack.}, keywords = {communication avoiding algorithms, DATAFLOW scheduling runtimes, hardware accelerators}, doi = {https://doi.org/10.1016/j.jocs.2020.101216}, author = {Jack Dongarra and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @techreport {, title = {Translational Process: Mathematical Software Perspective}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-11}, year = {2020}, month = {2020-08}, abstract = {Each successive generation of computer architecture has brought new challenges to achieving high performance mathematical solvers, necessitating development and analysis of new algorithms, which are then embodied in software libraries. These libraries hide architectural details from applications, allowing them to achieve a level of portability across platforms from desktops to worldclass high performance computing (HPC) systems. Thus there has been an informal translational computer science process of developing algorithms and distributing them in open source software libraries for adoption by applications and vendors. With the move to exascale, increasing intentionality about this process will benefit the long-term sustainability of the scientific software stack.}, keywords = {communication avoiding algorithms, data flow scheduling runtimes, hardware accelerators}, author = {Jack Dongarra and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @conference {1483, title = {Twenty Years of Computational Science}, booktitle = {International Conference on Computational Science (ICCS 2020)}, year = {2020}, month = {2020-06}, address = {Amsterdam, Netherlands}, author = {Valeria Krzhizhanovskaya and G{\'a}bor Z{\'a}vodszky and Michael Lees and Jack Dongarra and Peter Sloot and S{\'e}rgio Brissos and Jo{\~a}o Teixeira} } @conference {, title = {Using Advanced Vector Extensions AVX-512 for MPI Reduction}, booktitle = {EuroMPI/USA {\textquoteright}20: 27th European MPI Users{\textquoteright} Group Meeting}, year = {2020}, month = {2020-09}, address = {Austin, TX}, abstract = {As the scale of high-performance computing (HPC) systems continues to grow, researchers are devoted themselves to explore increasing levels of parallelism to achieve optimal performance. The modern CPU{\textquoteright}s design, including its features of hierarchical memory and SIMD/vectorization capability, governs algorithms{\textquoteright} efficiency. The recent introduction of wide vector instruction set extensions (AVX and SVE) motivated vectorization to become of critical importance to increase efficiency and close the gap to peak performance. In this paper, we propose an implementation of predefined MPI reduction operations utilizing AVX, AVX2 and AVX-512 intrinsics to provide vector-based reduction operation and to improve the timeto- solution of these predefined MPI reduction operations. With these optimizations, we achieve higher efficiency for local computations, which directly benefit the overall cost of collective reductions. The evaluation of the resulting software stack under different scenarios demonstrates that the solution is at the same time generic and efficient. Experiments are conducted on an Intel Xeon Gold cluster, which shows our AVX-512 optimized reduction operations achieve 10X performance benefits than Open MPI default for MPI local reduction.}, keywords = {Instruction level parallelism, Intel AVX2/AVX-512, Long vector extension, MPI reduction operation, Single instruction multiple data, Vector operation}, doi = {https://doi.org/10.1145/3416315.3416316}, author = {Dong Zhong and Qinglei Cao and George Bosilca and Jack Dongarra} } @article {, title = {Using Advanced Vector Extensions AVX-512 for MPI Reduction (Poster)}, year = {2020}, month = {2020-09}, publisher = {EuroMPI/USA {\textquoteright}20: 27th European MPI Users{\textquoteright} Group Meeting}, address = {Austin, TX}, author = {Dong Zhong and George Bosilca and Qinglei Cao and Jack Dongarra} } @conference {1484, title = {Using Arm Scalable Vector Extension to Optimize Open MPI}, booktitle = {20th IEEE/ACM International Symposium on Cluster, Cloud and Internet Computing (CCGRID 2020)}, year = {2020}, month = {2020-05}, publisher = {IEEE/ACM}, organization = {IEEE/ACM}, address = {Melbourne, Australia}, abstract = {As the scale of high-performance computing (HPC) systems continues to grow, increasing levels of parallelism must be implored to achieve optimal performance. Recently, the processors support wide vector extensions, vectorization becomes much more important to exploit the potential peak performance of target architecture. Novel processor architectures, such as the Armv8-A architecture, introduce Scalable Vector Extension (SVE) - an optional separate architectural extension with a new set of A64 instruction encodings, which enables even greater parallelisms. In this paper, we analyze the usage and performance of the SVE instructions in Arm SVE vector Instruction Set Architecture (ISA); and utilize those instructions to improve the memcpy and various local reduction operations. Furthermore, we propose new strategies to improve the performance of MPI operations including datatype packing/unpacking and MPI reduction. With these optimizations, we not only provide a higher-parallelism for a single node, but also achieve a more efficient communication scheme of message exchanging. The resulting efforts have been implemented in the context of OPEN MPI, providing efficient and scalable capabilities of SVE usage and extending the possible implementations of SVE to a more extensive range of programming and execution paradigms. The evaluation of the resulting software stack under different scenarios with both simulator and Fujitsu{\textquoteright}s A64FX processor demonstrates that the solution is at the same time generic and efficient.}, keywords = {ARMIE, datatype pack and unpack, local reduction, non-contiguous accesses, SVE, Vector Length Agnostic}, doi = {https://doi.org/10.1109/CCGrid49817.2020.00-71}, author = {Dong Zhong and Pavel Shamis and Qinglei Cao and George Bosilca and Jack Dongarra} } @article {, title = {Using Quantized Integer in LU Factorization with Partial Pivoting (Poster)}, year = {2020}, month = {2020-02}, publisher = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP20)}, address = {Seattle, WA}, abstract = {Quantization is a common technique to speed the deep learning inference. It is using integers with a shared scalar to represent a set of equally spaced numbers. The quantized integer method has shown great success in compressing the deep learning models, reducing the computation cost without losing too much accuracy. New application specific hardware and specialized CPU extension instructions like Intel AVX-512 VNNI are providing capabilities for us to do integer MADD (multiply and add) efficiently. In this poster, we would like to show our preliminary results of using quantization integers for LU factorization with partial pivoting. Using Int32, the backward error can outperform single precision. However, quantized integer has the similar issue of limited range as FP16 that it would not work directly for large matrices because of big numbers would occur in factored U. We will show some possible solutions to it and how we would like to apply this quantized integer technique to other numerical linear algebra applications.}, author = {Yaohung Tsai and Piotr Luszczek and Jack Dongarra} } @article {1319, title = {Adaptive Precision in Block-Jacobi Preconditioning for Iterative Sparse Linear System Solvers}, journal = {Concurrency and Computation: Practice and Experience}, volume = {31}, number = {6}, year = {2019}, month = {2019-03}, pages = {e4460}, abstract = {Summary We propose an adaptive scheme to reduce communication overhead caused by data movement by selectively storing the diagonal blocks of a block-Jacobi preconditioner in different precision formats (half, single, or double). This specialized preconditioner can then be combined with any Krylov subspace method for the solution of sparse linear systems to perform all arithmetic in double precision. We assess the effects of the adaptive precision preconditioner on the iteration count and data transfer cost of a preconditioned conjugate gradient solver. A preconditioned conjugate gradient method is, in general, a memory bandwidth-bound algorithm, and therefore its execution time and energy consumption are largely dominated by the costs of accessing the problem{\textquoteright}s data in memory. Given this observation, we propose a model that quantifies the time and energy savings of our approach based on the assumption that these two costs depend linearly on the bit length of a floating point number. Furthermore, we use a number of test problems from the SuiteSparse matrix collection to estimate the potential benefits of the adaptive block-Jacobi preconditioning scheme.}, keywords = {adaptive precision, block-Jacobi preconditioning, communication reduction, energy efficiency, Krylov subspace methods, sparse linear systems}, doi = {https://doi.org/10.1002/cpe.4460}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cpe.4460}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Nicholas J. Higham and Enrique S. Quintana-Orti} } @article {1262, title = {Algorithms and Optimization Techniques for High-Performance Matrix-Matrix Multiplications of Very Small Matrices}, journal = {Parallel Computing}, volume = {81}, year = {2019}, month = {2019-01}, pages = {1{\textendash}21}, abstract = {Expressing scientific computations in terms of BLAS, and in particular the general dense matrix-matrix multiplication (GEMM), is of fundamental importance for obtaining high performance portability across architectures. However, GEMMs for small matrices of sizes smaller than 32 are not sufficiently optimized in existing libraries. We consider the computation of many small GEMMs and its performance portability for a wide range of computer architectures, including Intel CPUs, ARM, IBM, Intel Xeon Phi, and GPUs. These computations often occur in applications like big data analytics, machine learning, high-order finite element methods (FEM), and others. The GEMMs are grouped together in a single batched routine. For these cases, we present algorithms and their optimization techniques that are specialized for the matrix sizes and architectures of interest. We derive a performance model and show that the new developments can be tuned to obtain performance that is within 90\% of the optimal for any of the architectures of interest. For example, on a V100 GPU for square matrices of size 32, we achieve an execution rate of about 1600 gigaFLOP/s in double-precision arithmetic, which is 95\% of the theoretically derived peak for this computation on a V100 GPU. We also show that these results outperform currently available state-of-the-art implementations such as vendor-tuned math libraries, including Intel MKL and NVIDIA CUBLAS, as well as open-source libraries like OpenBLAS and Eigen.}, keywords = {Autotuning, Batched GEMM, HPC, Matrix-matrix product, optimization, Small matrices}, doi = {https://doi.org/10.1016/j.parco.2018.10.003}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Marc Baboulin and Jo{\"e}l Falcou and Jack Dongarra} } @techreport {1433, title = {CEED ECP Milestone Report: Performance Tuning of CEED Software and 1st and 2nd Wave Apps}, year = {2019}, month = {2019-10}, publisher = {Zenodo}, doi = {https://doi.org/10.5281/zenodo.3477618}, author = {Stanimire Tomov and Ahmad Abdelfattah and Valeria Barra and Natalie Beams and Jed Brown and Jean-Sylvain Camier and Veselin Dobrev and Jack Dongarra and Yohann Dudouit and Paul Fischer and Ali Karakus and Stefan Kerkemeier and Tzanio Kolev and YuHsiang Lan and Elia Merzari and Misun Min and Aleks Obabko and Scott Parker and Thilina Ratnayaka and Jeremy Thompson and Ananias Tomboulides and Vladimir Tomov and Tim Warburton} } @conference {1449, title = {Characterization of Power Usage and Performance in Data-Intensive Applications using MapReduce over MPI}, booktitle = {2019 International Conference on Parallel Computing (ParCo2019)}, year = {2019}, month = {2019-09}, address = {Prague, Czech Republic}, author = {Joshua Davis and Tao Gao and Sunita Chandrasekaran and Heike Jagode and Anthony Danalis and Pavan Balaji and Jack Dongarra and Michela Taufer} } @article {1305, title = {Checkpointing Strategies for Shared High-Performance Computing Platforms}, journal = {International Journal of Networking and Computing}, volume = {9}, number = {1}, year = {2019}, pages = {28{\textendash}52}, abstract = {Input/output (I/O) from various sources often contend for scarcely available bandwidth. For example, checkpoint/restart (CR) protocols can help to ensure application progress in failure-prone environments. However, CR I/O alongside an application{\textquoteright}s normal, requisite I/O can increase I/O contention and might negatively impact performance. In this work, we consider different aspects (system-level scheduling policies and hardware) that optimize the overall performance of concurrently executing CR-based applications that share I/O resources. We provide a theoretical model and derive a set of necessary constraints to minimize the global waste on a given platform. Our results demonstrate that Young/Daly{\textquoteright}s optimal checkpoint interval, despite providing a sensible metric for a single, undisturbed application, is not sufficient to optimally address resource contention at scale. We show that by combining optimal checkpointing periods with contention-aware system-level I/O scheduling strategies, we can significantly improve overall application performance and maximize the platform throughput. Finally, we evaluate how specialized hardware, namely burst buffers, may help to mitigate the I/O contention problem. Overall, these results provide critical analysis and direct guidance on how to design efficient, CR ready, large -scale platforms without a large investment in the I/O subsystem.}, issn = {2185-2847}, url = {http://www.ijnc.org/index.php/ijnc/article/view/195}, author = {Thomas Herault and Yves Robert and Aurelien Bouteiller and Dorian Arnold and Kurt Ferreira and George Bosilca and Jack Dongarra} } @article {1301, title = {Comparing the Performance of Rigid, Moldable, and Grid-Shaped Applications on Failure-Prone HPC Platforms}, journal = {Parallel Computing}, volume = {85}, year = {2019}, month = {2019-07}, pages = {1{\textendash}12}, doi = {https://doi.org/10.1016/j.parco.2019.02.002}, author = {Valentin Le F{\`e}vre and Thomas Herault and Yves Robert and Aurelien Bouteiller and Atsushi Hori and George Bosilca and Jack Dongarra} } @conference {1379, title = {Counter Inspection Toolkit: Making Sense out of Hardware Performance Events}, booktitle = {11th International Workshop on Parallel Tools for High Performance Computing}, year = {2019}, month = {2019-02}, publisher = {Cham, Switzerland: Springer}, organization = {Cham, Switzerland: Springer}, address = {Dresden, Germany}, abstract = {Hardware counters play an essential role in understanding the behavior of performance-critical applications, and inform any effort to identify opportunities for performance optimization. However, because modern hardware is becoming increasingly complex, the number of counters that are offered by the vendors increases and, in some cases, so does their complexity. In this paper we present a toolkit that aims to assist application developers invested in performance analysis by automatically categorizing and disambiguating performance counters. We present and discuss the set of microbenchmarks and analyses that we developed as part of our toolkit. We explain why they work and discuss the non-obvious reasons why some of our early benchmarks and analyses did not work in an effort to share with the rest of the community the wisdom we acquired from negative results.}, doi = {https://doi.org/10.1007/978-3-030-11987-4_2}, author = {Anthony Danalis and Heike Jagode and H Hanumantharayappa and Sangamesh Ragate and Jack Dongarra} } @techreport {1322, title = {Design and Implementation for FFT-ECP on Distributed Accelerated Systems}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-05}, year = {2019}, month = {2019-04}, publisher = {University of Tennessee}, type = {ECP WBS 2.3.3.09 Milestone Report}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Daniel Schultz and Jack Dongarra} } @article {1370, title = {Distributed-Memory Lattice H-Matrix Factorization}, journal = { The International Journal of High Performance Computing Applications}, volume = {33}, year = {2019}, month = {2019-08}, pages = {1046{\textendash}1063}, abstract = {We parallelize the LU factorization of a hierarchical low-rank matrix (â„‹-matrix) on a distributed-memory computer. This is much more difficult than the â„‹-matrix-vector multiplication due to the dataflow of the factorization, and it is much harder than the parallelization of a dense matrix factorization due to the irregular hierarchical block structure of the matrix. Block low-rank (BLR) format gets rid of the hierarchy and simplifies the parallelization, often increasing concurrency. However, this comes at a price of losing the near-linear complexity of the â„‹-matrix factorization. In this work, we propose to factorize the matrix using a {\textquotedblleft}lattice â„‹-matrix{\textquotedblright} format that generalizes the BLR format by storing each of the blocks (both diagonals and off-diagonals) in the â„‹-matrix format. These blocks stored in the â„‹-matrix format are referred to as lattices. Thus, this lattice format aims to combine the parallel scalability of BLR factorization with the near-linear complexity of â„‹-matrix factorization. We first compare factorization performances using the â„‹-matrix, BLR, and lattice â„‹-matrix formats under various conditions on a shared-memory computer. Our performance results show that the lattice format has storage and computational complexities similar to those of the â„‹-matrix format, and hence a much lower cost of factorization than BLR. We then compare the BLR and lattice â„‹-matrix factorization on distributed-memory computers. Our performance results demonstrate that compared with BLR, the lattice format with the lower cost of factorization may lead to faster factorization on the distributed-memory computer.}, doi = {https://doi.org/10.1177/1094342019861139}, author = {Ichitaro Yamazaki and Akihiro Ida and Rio Yokota and Jack Dongarra} } @article {1387, title = {Does your tool support PAPI SDEs yet?}, year = {2019}, month = {2019-07}, publisher = {13th Scalable Tools Workshop}, address = {Tahoe City, CA}, author = {Anthony Danalis and Heike Jagode and Jack Dongarra} } @techreport {1396, title = {An Empirical View of SLATE Algorithms on Scalable Hybrid System}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-08}, year = {2019}, month = {2019-09}, publisher = {University of Tennessee, Knoxville}, author = {Asim YarKhan and Jakub Kurzak and Ahmad Abdelfattah and Jack Dongarra} } @article {1269, title = {Evaluation of Directive-Based Performance Portable Programming Models}, journal = {International Journal of High Performance Computing and Networking}, volume = {14}, year = {2019}, month = {2019{\textendash}07}, pages = {165-182}, abstract = {We present an extended exploration of the performance portability of directives provided by OpenMP 4 and OpenACC to program various types of node architecture with attached accelerators, both self-hosted multicore and offload multicore/GPU. Our goal is to examine how successful OpenACC and the newer offload features of OpenMP 4.5 are for moving codes between architectures, and we document how much tuning might be required and what lessons we can learn from these experiences. To do this, we use examples of algorithms with varying computational intensities for our evaluation, as both compute and data access efficiency are important considerations for overall application performance. To better understand fundamental compute vs. bandwidth bound characteristics, we add the compute-bound Level 3 BLAS GEMM kernel to our linear algebra evaluation. We implement the kernels of interest using various methods provided by newer OpenACC and OpenMP implementations, and we evaluate their performance on various platforms including both x86_64 and Power8 with attached NVIDIA GPUs, x86_64 multicores, self-hosted Intel Xeon Phi KNL, as well as an x86_64 host system with Intel Xeon Phi coprocessors. We update these evaluations with the newest version of the NVIDIA Pascal architecture (P100), Intel KNL 7230, Power8+, and the newest supporting compiler implementations. Furthermore, we present in detail what factors affected the performance portability, including how to pick the right programming model, its programming style, its availability on different platforms, and how well compilers can optimise and target multiple platforms.}, keywords = {OpenACC, OpenMP 4, performance portability, Programming models}, doi = {http://dx.doi.org/10.1504/IJHPCN.2017.10009064 }, author = {M. Graham Lopez and Wayne Joubert and Ver{\'o}nica Larrea and Oscar Hernandez and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1451, title = {Evaluation of Programming Models to Address Load Imbalance on Distributed Multi-Core CPUs: A Case Study with Block Low-Rank Factorization}, booktitle = {PAW-ATM Workshop at SC19}, year = {2019}, month = {2019-11}, publisher = {ACM}, organization = {ACM}, address = {Denver, CO}, author = {Yu Pei and George Bosilca and Ichitaro Yamazaki and Akihiro Ida and Jack Dongarra} } @conference {1323, title = {Fast Batched Matrix Multiplication for Small Sizes using Half Precision Arithmetic on GPUs}, booktitle = {33rd IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2019}, month = {2019-05}, publisher = {IEEE}, organization = {IEEE}, address = {Rio de Janeiro, Brazil}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {1329, title = {FFT-ECP Fast Fourier Transform}, year = {2019}, month = {2019-01}, publisher = {2019 ECP Annual Meeting (Research Poster)}, address = {Houston, TX}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Daniel Schultz and Jack Dongarra} } @techreport {1401, title = {FFT-ECP Implementation Optimizations and Features Phase}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-12}, year = {2019}, month = {2019-10}, publisher = {University of Tennessee}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Hejer Shaiek and Jack Dongarra} } @conference {1410, title = {Generic Matrix Multiplication for Multi-GPU Accelerated Distributed-Memory Platforms over PaRSEC}, booktitle = {ScalA{\textquoteright}19: 10th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2019}, month = {2019-11}, publisher = {IEEE}, organization = {IEEE}, address = {Denver, CO}, author = {Thomas Herault and Yves Robert and George Bosilca and Jack Dongarra} } @article {1385, title = {GPUDirect MPI Communications and Optimizations to Accelerate FFTs on Exascale Systems}, journal = {EuroMPI{\textquoteright}19 Posters, Zurich, Switzerland}, number = {icl-ut-19-06}, year = {2019}, month = {2019-09}, publisher = {ICL}, type = {Extended Abstract}, abstract = {Fast Fourier transforms (FFTs) are used in applications ranging from molecular dynamics and spectrum estimation to machine learn- ing, fast convolution and correlation, signal modulation, wireless multimedia applications, and others. However, FFTs are memory bound, and therefore, to accelerate them, it is crucial to avoid and optimize the FFTs{\textquoteright} communications. To this end, we present a 3-D FFT design for distributed graphics processing unit (GPU) systems that: (1) efficiently uses GPUs{\textquoteright} high bandwidth, (2) reduces global communications algorithmically, when possible, and (3) employs GPUDirect technologies as well as MPI optimizations in the development of high-performance FFTs for large-scale GPU-accelerated systems. We show that these developments and optimizations lead to very good strong scalability and a performance that is close to 90\% of the theoretical peak.}, keywords = {CUDA-Aware MPI, ECP, FFT, FFT-ECP, gpu, GPUDirect}, author = {Hejer Shaiek and Stanimire Tomov and Alan Ayala and Azzam Haidar and Jack Dongarra} } @conference {1325, title = {Hands-on Research and Training in High-Performance Data Sciences, Data Analytics, and Machine Learning for Emerging Environments}, booktitle = {ISC High Performance}, year = {2019}, month = {2019-06}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Frankfurt, Germany}, author = {Kwai Wong and Stanimire Tomov and Jack Dongarra} } @conference {1403, title = {Impacts of Multi-GPU MPI Collective Communications on Large FFT Computation}, booktitle = {Workshop on Exascale MPI (ExaMPI) at SC19}, year = {2019}, month = {2019-11}, address = {Denver, CO}, keywords = {Collective MPI, Exascale applications, FFT, Heterogeneous systems, scalable}, author = {Alan Ayala and Stanimire Tomov and Xi Luo and Hejer Shaiek and Azzam Haidar and George Bosilca and Jack Dongarra} } @conference {1441, title = {Increasing Accuracy of Iterative Refinement in Limited Floating-Point Arithmetic on Half-Precision Accelerators}, booktitle = {IEEE High Performance Extreme Computing Conference (HPEC 2019), Best Paper Finalist}, year = {2019}, month = {2019-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {The emergence of deep learning as a leading computational workload for machine learning tasks on large-scale cloud infrastructure installations has led to plethora of accelerator hardware releases. However, the reduced precision and range of the floating-point numbers on these new platforms makes it a non-trivial task to leverage these unprecedented advances in computational power for numerical linear algebra operations that come with a guarantee of robust error bounds. In order to address these concerns, we present a number of strategies that can be used to increase the accuracy of limited-precision iterative refinement. By limited precision, we mean 16-bit floating-point formats implemented in modern hardware accelerators and are not necessarily compliant with the IEEE half-precision specification. We include the explanation of a broader context and connections to established IEEE floating-point standards and existing high-performance computing (HPC) benchmarks. We also present a new formulation of LU factorization that we call signed square root LU which produces more numerically balanced L and U factors which directly address the problems of limited range of the low-precision storage formats. The experimental results indicate that it is possible to recover substantial amounts of the accuracy in the system solution that would otherwise be lost. Previously, this could only be achieved by using iterative refinement based on single-precision floating-point arithmetic. The discussion will also explore the numerical stability issues that are important for robust linear solvers on these new hardware platforms.}, author = {Piotr Luszczek and Ichitaro Yamazaki and Jack Dongarra} } @inproceedings {1404, title = {Least Squares Solvers for Distributed-Memory Machines with GPU Accelerators}, journal = {ACM International Conference on Supercomputing (ICS {\textquoteright}19)}, year = {2019}, month = {2019-06}, pages = {117{\textendash}126}, publisher = {ACM}, address = {Phoenix, Arizona}, isbn = {9781450360791}, doi = {https://dl.acm.org/doi/abs/10.1145/3330345.3330356}, author = {Jakub Kurzak and Mark Gates and Ali Charara and Asim YarKhan and Jack Dongarra} } @inproceedings {1405, title = {Linear Systems Solvers for Distributed-Memory Machines with GPU Accelerators}, journal = {Euro-Par 2019: Parallel Processing}, volume = {11725}, year = {2019}, month = {2019-08}, pages = {495{\textendash}506}, publisher = {Springer}, isbn = {978-3-030-29399-4}, doi = {https://doi.org/10.1007/978-3-030-29400-7_35}, url = {https://link.springer.com/chapter/10.1007/978-3-030-29400-7_35}, author = {Kurzak, Jakub and Mark Gates and Charara, Ali and Asim YarKhan and Yamazaki, Ichitaro and Jack Dongarra}, editor = {Yahyapour, Ramin} } @article {1366, title = {MagmaDNN 0.2 High-Performance Data Analytics for Manycore GPUs and CPUs}, year = {2019}, month = {2019-01}, publisher = {University of Tennessee}, doi = {10.13140/RG.2.2.14906.64961}, author = {Lucien Ng and Sihan Chen and Alex Gessinger and Daniel Nichols and Sophia Cheng and Anu Meenasorna and Kwai Wong and Stanimire Tomov and Azzam Haidar and Eduardo D{\textquoteright}Azevedo and Jack Dongarra} } @conference {1324, title = {MagmaDNN: Towards High-Performance Data Analytics and Machine Learning for Data-Driven Scientific Computing}, booktitle = {ISC High Performance}, year = {2019}, month = {2019-06}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Frankfurt, Germany}, abstract = {In this paper, we present work towards the development of a new data analytics and machine learning (ML) framework, called MagmaDNN. Our main goal is to provide scalable, high-performance data analytics and ML solutions for scientific applications running on current and upcoming heterogeneous many-core GPU-accelerated architectures. To this end, since many of the functionalities needed are based on standard linear algebra (LA) routines, we designed MagmaDNN to derive its performance power from the MAGMA library. The close integration provides the fundamental (scalable high-performance) LA routines available in MAGMA as a backend to MagmaDNN. We present some design issues for performance and scalability that are specific to ML using Deep Neural Networks (DNN), as well as the MagmaDNN designs towards overcoming them. In particular, MagmaDNN uses well established HPC techniques from the area of dense LA, including task-based parallelization, DAG representations, scheduling, mixed-precision algorithms, asynchronous solvers, and autotuned hyperparameter optimization. We illustrate these techniques and their incorporation and use to outperform other frameworks, currently available.}, doi = {https://doi.org/10.1007/978-3-030-34356-9_37}, author = {Daniel Nichols and Natalie-Sofia Tomov and Frank Betancourt and Stanimire Tomov and Kwai Wong and Jack Dongarra} } @conference {1373, title = {Massively Parallel Automated Software Tuning}, booktitle = {48th International Conference on Parallel Processing (ICPP 2019)}, year = {2019}, month = {2019-08}, publisher = {ACM Press}, organization = {ACM Press}, address = {Kyoto, Japan}, abstract = {This article presents an implementation of a distributed autotuning engine developed as part of the Bench-testing OpenN Software Autotuning Infrastructure project. The system is geared towards performance optimization of computational kernels for graphics processing units, and allows for the deployment of vast autotuning sweeps to massively parallel machines. The software implements dynamic work scheduling to distributed-memory resources and takes advantage of multithreading for parallel compilation and dispatches kernel launches to multiple accelerators. This paper lays out the main design principles of the system and discusses the basic mechanics of the initial implementation. Preliminary performance results are presented, encountered challenges are discussed, and the future directions are outlined.}, doi = {https://doi.org/10.1145/3337821.3337908}, author = {Jakub Kurzak and Yaohung Tsai and Mark Gates and Ahmad Abdelfattah and Jack Dongarra} } @conference {1303, title = {Matrix Powers Kernels for Thick-Restart Lanczos with Explicit External Deflation}, booktitle = {International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2019}, month = {2019-05}, publisher = {IEEE}, organization = {IEEE}, address = {Rio de Janeiro, Brazil}, abstract = {Some scientific and engineering applications need to compute a large number of eigenpairs of a large Hermitian matrix. Though the Lanczos method is effective for computing a few eigenvalues, it can be expensive for computing a large number of eigenpairs (e.g., in terms of computation and communication). To improve the performance of the method, in this paper, we study an s-step variant of thick-restart Lanczos (TRLan) combined with an explicit external deflation (EED). The s-step method generates a set of s basis vectors at a time and reduces the communication costs of generating the basis vectors. We then design a specialized matrix powers kernel (MPK) that reduces both the communication and computational costs by taking advantage of the special properties of the deflation matrix. We conducted numerical experiments of the new TRLan eigensolver using synthetic matrices and matrices from electronic structure calculations. The performance results on the Cori supercomputer at the National Energy Research Scientific Computing Center (NERSC) demonstrate the potential of the specialized MPK to significantly reduce the execution time of the TRLan eigensolver. The speedups of up to 3.1{\texttimes} and 5.3{\texttimes} were obtained in our sequential and parallel runs, respectively.}, author = {Zhaojun Bai and Jack Dongarra and Ding Lu and Ichitaro Yamazaki} } @article {1374, title = {Optimizing Batch HGEMM on Small Sizes Using Tensor Cores}, year = {2019}, month = {2019-03}, publisher = {GPU Technology Conference (GTC)}, address = {San Jose, CA}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {1377, title = {PAPI Software-Defined Events for in-Depth Performance Analysis}, journal = {The International Journal of High Performance Computing Applications}, volume = {33}, year = {2019}, month = {2019-11}, pages = {1113-1127}, abstract = {The methodology and standardization layer provided by the Performance Application Programming Interface (PAPI) has played a vital role in application profiling for almost two decades. It has enabled sophisticated performance analysis tool designers and performance-conscious scientists to gain insights into their applications by simply instrumenting their code using a handful of PAPI functions that {\textquotedblleft}just work{\textquotedblright} across different hardware components. In the past, PAPI development had focused primarily on hardware-specific performance metrics. However, the rapidly increasing complexity of software infrastructure poses new measurement and analysis challenges for the developers of large-scale applications. In particular, acquiring information regarding the behavior of libraries and runtimes{\textemdash}used by scientific applications{\textemdash}requires low-level binary instrumentation, or APIs specific to each library and runtime. No uniform API for monitoring events that originate from inside the software stack has emerged. In this article, we present our efforts to extend PAPI{\textquoteright}s role so that it becomes the de facto standard for exposing performance-critical events, which we refer to as software-defined events (SDEs), from different software layers. Upgrading PAPI with SDEs enables monitoring of both types of performance events{\textemdash}hardware- and software-related events{\textemdash}in a uniform way, through the same consistent PAPI. The goal of this article is threefold. First, we motivate the need for SDEs and describe our design decisions regarding the functionality we offer through PAPI{\textquoteright}s new SDE interface. Second, we illustrate how SDEs can be utilized by different software packages, specifically, by showcasing their use in the numerical linear algebra library MAGMA-Sparse, the tensor algebra library TAMM that is part of the NWChem suite, and the compiler-based performance analysis tool Byfl. Third, we provide a performance analysis of the overhead that results from monitoring SDEs and discuss the trade-offs between overhead and functionality.}, url = {https://doi.org/10.1177/1094342019846287}, author = {Heike Jagode and Anthony Danalis and Hartwig Anzt and Jack Dongarra} } @article {1386, title = {PAPI{\textquoteright}s new Software-Defined Events for in-depth Performance Analysis}, year = {2019}, month = {2019-09}, publisher = {13th Parallel Tools Workshop}, address = {Dresden, Germany}, abstract = {One of the most recent developments of the Performance API (PAPI) is the addition of Software-Defined Events (SDE). PAPI has successfully served the role of the abstraction and unification layer for hardware performance counters for the past two decades. This talk presents our effort to extend this role to encompass performance critical information that does not originate in hardware, but rather in critical software layers, such as libraries and runtime systems. Our overall objective is to enable monitoring of both types of performance events, hardware- and software-related events, in a uniform way, through one consistent PAPI interface. Performance analysts will be able to form a complete picture of the entire application performance without learning new instrumentation primitives. In this talk, we outline PAPI{\textquoteright}s new SDE API and showcase the usefulness of SDE through its employment in software layers as diverse as the math library MAGMA, the dataflow runtime PaRSEC, and the state-of-the-art chemistry application NWChem. We outline the process of instrumenting these software packages and highlight the performance information that can be acquired with SDEs.}, author = {Anthony Danalis and Heike Jagode and Jack Dongarra} } @conference {1436, title = {ParILUT {\textendash} A Parallel Threshold ILU for GPUs}, booktitle = {IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2019}, month = {2019-05}, publisher = {IEEE}, organization = {IEEE}, address = {Rio de Janeiro, Brazil}, abstract = {In this paper, we present the first algorithm for computing threshold ILU factorizations on GPU architectures. The proposed ParILUT-GPU algorithm is based on interleaving parallel fixed-point iterations that approximate the incomplete factors for an existing nonzero pattern with a strategy that dynamically adapts the nonzero pattern to the problem characteristics. This requires the efficient selection of thresholds that separate the values to be dropped from the incomplete factors, and we design a novel selection algorithm tailored towards GPUs. All components of the ParILUT-GPU algorithm make heavy use of the features available in the latest NVIDIA GPU generations, and outperform existing multithreaded CPU implementations.}, doi = {https://doi.org/10.1109/IPDPS.2019.00033}, author = {Hartwig Anzt and Tobias Ribizel and Goran Flegar and Edmond Chow and Jack Dongarra} } @conference {1452, title = {Performance Analysis of Tile Low-Rank Cholesky Factorization Using PaRSEC Instrumentation Tools}, booktitle = {Workshop on Programming and Performance Visualization Tools (ProTools 19) at SC19}, year = {2019}, month = {2019-11}, publisher = {ACM}, organization = {ACM}, address = {Denver, CO}, author = {Qinglei Cao and Yu Pei and Thomas Herault and Kadir Akbudak and Aleksandr Mikhalev and George Bosilca and Hatem Ltaief and David Keyes and Jack Dongarra} } @article {1311, title = {Performance of Asynchronous Optimized Schwarz with One-sided Communication}, journal = {Parallel Computing}, volume = {86}, year = {2019}, month = {2019-08}, pages = {66-81}, abstract = {In asynchronous iterative methods on distributed-memory computers, processes update their local solutions using data from other processes without an implicit or explicit global synchronization that corresponds to advancing the global iteration counter. In this work, we test the asynchronous optimized Schwarz domain-decomposition iterative method using various one-sided (remote direct memory access) communication schemes with passive target completion. The results show that when one-sided communication is well-supported, the asynchronous version of optimized Schwarz can outperform the synchronous version even for perfectly balanced partitionings of the problem on a supercomputer with uniform nodes.}, issn = {0167-8191}, doi = {https://doi.org/10.1016/j.parco.2019.05.004}, url = {http://www.sciencedirect.com/science/article/pii/S0167819118301261}, author = {Ichitaro Yamazaki and Edmond Chow and Aurelien Bouteiller and Jack Dongarra} } @article {1270, title = {PLASMA: Parallel Linear Algebra Software for Multicore Using OpenMP}, journal = {ACM Transactions on Mathematical Software}, volume = {45}, year = {2019}, month = {2019-06}, doi = {https://doi.org/10.1145/3264491}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Maksims Abalenkovs and Negin Bagherpour and Sven Hammarling and Jakub Sistek} } @conference {1376, title = {Progressive Optimization of Batched LU Factorization on GPUs}, booktitle = { IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}19)}, year = {2019}, month = {2019-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {1440, title = {Race to Exascale}, journal = {Computing in Science and Engineering}, volume = {21}, year = {2019}, month = {2019-03}, pages = {4-5}, abstract = {Whether called leadership computing, flagship computing, or just plain exascale, over the next few years, governments around the world are planning to spend over 10 billion dollars on a handful of new computer systems that will strive to reach an exascale level of performance. These systems and projects reflect the widespread and expanding recognition that almost all science and engineering endeavors now are intrinsically reliant on computing power not just for modeling and simulation but for data analysis, big data, and machine learning. Scientists and engineers consider computers as {\textquotedblleft}universal instruments{\textquotedblright} of insight.}, issn = {1558-366X}, doi = {https://doi.org/10.1109/MCSE.2018.2882574}, author = {Jack Dongarra and Steven Gottlieb and William T. Kramer} } @article {1463, title = {SLATE: Design of a Modern Distributed and Accelerated Linear Algebra Library}, year = {2019}, month = {2019-11}, publisher = {International Conference for High Performance Computing, Networking, Storage and Analysis (SC19)}, address = {Denver, CO}, author = {Mark Gates and Jakub Kurzak and Ali Charara and Asim YarKhan and Jack Dongarra} } @conference {1450, title = {SLATE: Design of a Modern Distributed and Accelerated Linear Algebra Library}, booktitle = {International Conference for High Performance Computing, Networking, Storage and Analysis (SC19)}, year = {2019}, month = {2019-11}, publisher = {ACM}, organization = {ACM}, address = {Denver, CO}, abstract = {The SLATE (Software for Linear Algebra Targeting Exascale) library is being developed to provide fundamental dense linear algebra capabilities for current and upcoming distributed high-performance systems, both accelerated CPU-GPU based and CPU based. SLATE will provide coverage of existing ScaLAPACK functionality, including the parallel BLAS; linear systems using LU and Cholesky; least squares problems using QR; and eigenvalue and singular value problems. In this respect, it will serve as a replacement for ScaLAPACK, which after two decades of operation, cannot adequately be retrofitted for modern accelerated architectures. SLATE uses modern techniques such as communication-avoiding algorithms, lookahead panels to overlap communication and computation, and task-based scheduling, along with a modern C++ framework. Here we present the design of SLATE and initial reports of several of its components.}, doi = {https://doi.org/10.1145/3295500.3356223}, author = {Mark Gates and Jakub Kurzak and Ali Charara and Asim YarKhan and Jack Dongarra} } @techreport {1279, title = {SLATE Developers{\textquoteright} Guide}, journal = {SLATE Working Notes}, number = {11, ICL-UT-19-02}, year = {2019}, month = {2019-12}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Ali Charara and Mark Gates and Jakub Kurzak and Asim YarKhan and Jack Dongarra} } @techreport {1304, title = {SLATE Mixed Precision Performance Report}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-03}, year = {2019}, month = {2019-04}, publisher = {University of Tennessee}, author = {Ali Charara and Jack Dongarra and Mark Gates and Jakub Kurzak and Asim YarKhan} } @techreport {1321, title = {SLATE Working Note 12: Implementing Matrix Inversions}, journal = {SLATE Working Notes}, number = {12, ICL-UT-19-04}, year = {2019}, month = {2019-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Jakub Kurzak and Mark Gates and Ali Charara and Asim YarKhan and Jack Dongarra} } @techreport {1394, title = {SLATE Working Note 13: Implementing Singular Value and Symmetric/Hermitian Eigenvalue Solvers}, journal = {SLATE Working Notes}, number = {13, ICL-UT-19-07}, year = {2019}, note = {revision 06-2023}, month = {2019-09}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Mark Gates and Mohammed Al Farhan and Ali Charara and Jakub Kurzak and Dalal Sukkari and Asim YarKhan and Jack Dongarra} } @conference {1378, title = {Software-Defined Events through PAPI}, booktitle = {2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2019}, month = {2019-05}, publisher = {IEEE}, organization = {IEEE}, address = {Rio de Janeiro, Brazil}, abstract = {PAPI has been used for almost two decades as an abstraction and standardization layer for profiling hardware-specific performance metrics. However, application developers-and profiling software packages-are quite often interested in information beyond hardware counters, such as the behavior of libraries used by the software that is being profiled. So far, accessing this information has required interfacing directly with the libraries on a case-by-case basis, or low-level binary instrumentation. In this paper, we introduce the new Software-Defined Event (SDE) component of PAPI which aims to enable PAPI to serve as an abstraction and standardization layer for events that originate in software layers as well. Extending PAPI to include SDEs enables monitoring of both types of performance events-hardware-and software-related events-in a uniform way, through the same consistent PAPI interface. Furthermore, implementing SDE as a PAPI component means that the new API is aimed only at the library developers who wish to export events from within their libraries. The API for reading PAPI events-both hardware and software-remains the same, so all legacy codes and tools that use PAPI will not only continue to work, but they will automatically be able to read SDEs wherever those are available. The goal of this paper is threefold. First, we outline our design decisions regarding the functionality we offer through the new SDE interface, and offer simple examples of usage. Second, we illustrate how those events can be utilized by different software packages, specifically, by showcasing their use in the task-based runtime PaRSEC, and the HPCG supercomputing benchmark. Third, we provide a thorough performance analysis of the overhead that results from monitoring different types of SDEs, and showcase the negligible overhead of using PAPI SDE even in cases of extremely heavy use.}, doi = {https://doi.org/10.1109/IPDPSW.2019.00069}, author = {Anthony Danalis and Heike Jagode and Thomas Herault and Piotr Luszczek and Jack Dongarra} } @article {1237, title = {Solving Linear Diophantine Systems on Parallel Architectures}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {30}, year = {2019}, month = {2019-05}, pages = {1158-1169}, abstract = {Solving linear Diophantine systems of equations is applied in discrete-event systems, model checking, formal languages and automata, logic programming, cryptography, networking, signal processing, and chemistry. For modeling discrete systems with Petri nets, a solution in non-negative integer numbers is required, which represents an intractable problem. For this reason, solving such kinds of tasks with significant speedup is highly appreciated. In this paper we design a new solver of linear Diophantine systems based on the parallel-sequential composition of the system clans. The solver is studied and implemented to run on parallel architectures using a two-level parallelization concept based on MPI and OpenMP. A decomposable system is usually represented by a sparse matrix; a minimal clan size of the decomposition restricts the granulation of the technique. MPI is applied for solving systems for clans using a parallel-sequential composition on distributed-memory computing nodes, while OpenMP is applied in solving a single indecomposable system on a single node using multiple cores. A dynamic task-dispatching subsystem is developed for distributing systems on nodes in the process of compositional solution. Computational speedups are obtained on a series of test examples, e.g., illustrating that the best value constitutes up to 45 times speedup obtained on 5 nodes with 20 cores each.}, keywords = {Mathematical model, Matrix decomposition, Parallel architectures, Petri nets, Software algorithms, Sparse matrices, Task analysis}, doi = {http://dx.doi.org/10.1109/TPDS.2018.2873354}, url = {https://ieeexplore.ieee.org/document/8482295}, author = {Dmitry Zaitsev and Stanimire Tomov and Jack Dongarra} } @conference {1318, title = {Towards Continuous Benchmarking}, booktitle = {Platform for Advanced Scientific Computing Conference (PASC 2019)}, year = {2019}, month = {2019-06}, publisher = {ACM Press}, organization = {ACM Press}, address = {Zurich, Switzerland}, abstract = {We present an automated performance evaluation framework that enables an automated workflow for testing and performance evaluation of software libraries. Integrating this component into an ecosystem enables sustainable software development, as a community effort, via a web application for interactively evaluating the performance of individual software components. The performance evaluation tool is based exclusively on web technologies, which removes the burden of downloading performance data or installing additional software. We employ this framework for the Ginkgo software ecosystem, but the framework can be used with essentially any software project, including the comparison between different software libraries. The Continuous Integration (CI) framework of Ginkgo is also extended to automatically run a benchmark suite on predetermined HPC systems, store the state of the machine and the environment along with the compiled binaries, and collect results in a publicly accessible performance data repository based on Git. The Ginkgo performance explorer (GPE) can be used to retrieve the performance data from the repository, and visualizes it in a web browser. GPE also implements an interface that allows users to write scripts, archived in a Git repository, to extract particular data, compute particular metrics, and visualize them in many different formats (as specified by the script). The combination of these approaches creates a workflow which enables performance reproducibility and software sustainability of scientific software. In this paper, we present example scripts that extract and visualize performance data for Ginkgo{\textquoteright}s SpMV kernels that allow users to identify the optimal kernel for specific problem characteristics.}, isbn = {9781450367707}, doi = {https://doi.org/10.1145/3324989.3325719}, author = {Hartwig Anzt and Yen Chen Chen and Terry Cojean and Jack Dongarra and Goran Flegar and Pratik Nayak and Enrique S. Quintana-Orti and Yuhsiang M. Tsai and Weichung Wang} } @conference {1435, title = {Towards Half-Precision Computation for Complex Matrices: A Case Study for Mixed Precision Solvers on GPUs}, booktitle = {ScalA19: 10th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2019}, month = {2019-11}, publisher = {IEEE}, organization = {IEEE}, address = {Denver, CO}, keywords = {Half precision, mixed-precision solvers, Tensor cores FP16 arithmetic}, author = {Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {1390, title = {Understanding Native Event Semantics}, year = {2019}, month = {2019-04}, publisher = {9th JLESC Workshop}, address = {Knoxville, TN}, author = {Anthony Danalis and Heike Jagode and Daniel Barry and Jack Dongarra} } @article {1220, title = {Variable-Size Batched Gauss-Jordan Elimination for Block-Jacobi Preconditioning on Graphics Processors}, journal = {Parallel Computing}, volume = {81}, year = {2019}, month = {2019-01}, pages = {131-146}, abstract = {In this work, we address the efficient realization of block-Jacobi preconditioning on graphics processing units (GPUs). This task requires the solution of a collection of small and independent linear systems. To fully realize this implementation, we develop a variable-size batched matrix inversion kernel that uses Gauss-Jordan elimination (GJE) along with a variable-size batched matrix{\textendash}vector multiplication kernel that transforms the linear systems{\textquoteright} right-hand sides into the solution vectors. Our kernels make heavy use of the increased register count and the warp-local communication associated with newer GPU architectures. Moreover, in the matrix inversion, we employ an implicit pivoting strategy that migrates the workload (i.e., operations) to the place where the data resides instead of moving the data to the executing cores. We complement the matrix inversion with extraction and insertion strategies that allow the block-Jacobi preconditioner to be set up rapidly. The experiments on NVIDIA{\textquoteright}s K40 and P100 architectures reveal that our variable-size batched matrix inversion routine outperforms the CUDA basic linear algebra subroutine (cuBLAS) library functions that provide the same (or even less) functionality. We also show that the preconditioner setup and preconditioner application cost can be somewhat offset by the faster convergence of the iterative solver.}, keywords = {Batched algorithms, Block-Jacobi, Gauss{\textendash}Jordan elimination, Graphics processor, matrix inversion, sparse linear systems}, doi = {https://doi.org/10.1016/j.parco.2017.12.006}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @conference {1380, title = {What it Takes to keep PAPI Instrumental for the HPC Community}, booktitle = {1st Workshop on Sustainable Scientific Software (CW3S19)}, year = {2019}, month = {2019-07}, address = {Collegeville, Minnesota}, url = {https://collegeville.github.io/CW3S19/WorkshopResources/WhitePapers/JagodeHeike_CW3S19_papi.pdf}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {1388, title = {What it Takes to keep PAPI Instrumental for the HPC Community}, year = {2019}, month = {2019-07}, publisher = {The 2019 Collegeville Workshop on Sustainable Scientific Software (CW3S19)}, address = {Collegeville, MN}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {1389, title = {Is your scheduling good? How would you know?}, year = {2019}, month = {2019-06}, publisher = {14th Scheduling for Large Scale Systems Workshop}, address = {Bordeaux, France}, abstract = {Optimal scheduling is a goal that can rarely be achieved, even in purely theoretical contexts where the nuanced behavior of complex hardware and software systems can be abstracted away, and simplified assumptions can be made. In real runtime systems, task schedulers are usually designed based on intuitions about optimal design and heuristics such as minimizing idle time and load imbalance, as well as maximizing data locality and reuse. This harsh reality is due in part to the very crude tools designers of task scheduling systems have at their disposal for assessing the quality of their assumptions. Examining hardware behavior{\textemdash}such as cache reuse{\textemdash}through counters rarely leads to improvement in scheduler design, and quite often the runtime designers are left with total execution time as their only guiding mechanism. In this talk we will discuss new methods for illuminating the dark corners of task scheduling on real hardware. We will present our work on extending PAPI{\textemdash}which has long been the de facto standard for accessing hardware events{\textemdash}so that it can be used to access software events. We will focus specifically on the impact this work can have on runtime systems with dynamic schedulers, and discuss illustrative examples.}, author = {Anthony Danalis and Heike Jagode and Jack Dongarra} } @article {1272, title = {The 30th Anniversary of the Supercomputing Conference: Bringing the Future Closer{\textemdash}Supercomputing History and the Immortality of Now}, journal = {Computer}, volume = {51}, year = {2018}, month = {2018-11}, pages = {74{\textendash}85}, abstract = {A panel of experts{\textemdash}including Gordon Bell, Jack Dongarra, William E. (Bill) Johnston, Horst Simon, Erich Strohmaier, and Mateo Valero{\textemdash}discuss historical reflections on the past 30 years of the Supercomputing (SC) conference, its leading role for the professional community and some exciting future challenges.}, keywords = {High-performance computing, history of computing, SC, Scientific computing, supercomputing, Virtual Roundtable}, doi = {10.1109/MC.2018.3971352}, author = {Jack Dongarra and Vladimir Getov and Kevin Walsh} } @article {1212, title = {Accelerating NWChem Coupled Cluster through dataflow-based Execution}, journal = {The International Journal of High Performance Computing Applications}, volume = {32}, year = {2018}, month = {2018-07}, pages = {540--551}, type = {Journal Article}, chapter = {540}, abstract = {Numerical techniques used for describing many-body systems, such as the Coupled Cluster methods (CC) of the quantum chemistry package NWCHEM, are of extreme interest to the computational chemistry community in fields such as catalytic reactions, solar energy, and bio-mass conversion. In spite of their importance, many of these computationally intensive algorithms have traditionally been thought of in a fairly linear fashion, or are parallelized in coarse chunks. In this paper, we present our effort of converting the NWCHEM{\textquoteright}s CC code into a dataflow-based form that is capable of utilizing the task scheduling system PARSEC (Parallel Runtime Scheduling and Execution Controller): a software package designed to enable high-performance computing at scale. We discuss the modularity of our approach and explain how the PARSEC-enabled dataflow version of the subroutines seamlessly integrate into the NWCHEM codebase. Furthermore, we argue how the CC algorithms can be easily decomposed into finer-grained tasks (compared with the original version of NWCHEM); and how data distribution and load balancing are decoupled and can be tuned independently. We demonstrate performance acceleration by more than a factor of two in the execution of the entire CC component of NWCHEM, concluding that the utilization of dataflow-based execution for CC methods enables more efficient and scalable computation.}, keywords = {CCSD, dag, dataflow, NWChem, parsec, ptg, tasks}, doi = {10.1177/1094342016672543}, url = {http://journals.sagepub.com/doi/10.1177/1094342016672543}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {1266, title = {Accelerating the SVD Bi-Diagonalization of a Batch of Small Matrices using GPUs}, journal = {Journal of Computational Science}, volume = {26}, year = {2018}, month = {2018-05}, pages = {237{\textendash}245}, abstract = {The acceleration of many small-sized linear algebra problems has become extremely challenging for current many-core architectures, and in particular GPUs. Standard interfaces have been proposed for some of these problems, called batched problems, so that they get targeted for optimization and used in a standard way in applications, calling them directly from highly optimized, standard numerical libraries, like (batched) BLAS and LAPACK. While most of the developments have been for one-sided factorizations and solvers, many important applications {\textendash} from big data analytics to information retrieval, low-rank approximations for solvers and preconditioners {\textendash} require two-sided factorizations, and most notably the SVD factorization. To address these needs and the parallelization challenges related to them, we developed a number of new batched computing techniques and designed batched Basic Linear Algebra Subroutines (BLAS) routines, and in particular the Level-2 BLAS GEMV and the Level-3 BLAS GEMM routines, to solve them. We propose a device functions-based methodology and big-tile setting techniques in our batched BLAS design. The different optimization techniques result in many software versions that must be tuned, for which we adopt an auto-tuning strategy to automatically derive the optimized instances of the routines. We illustrate our batched BLAS approach to optimize batched SVD bi-diagonalization progressively on GPUs. The progression is illustrated on an NVIDIA K40c GPU, and also, ported and presented on AMD Fiji Nano GPU, using AMD{\textquoteright}s Heterogeneous{\textendash}Compute Interface for Portability (HIP) C++ runtime API. We demonstrate achieving 80\% of the theoretically achievable peak performance for the overall algorithm, and significant acceleration of the Level-2 BLAS GEMV and Level-3 BLAS GEMM needed compared to vendor-optimized libraries on GPUs and multicore CPUs. The optimization techniques in this paper are applicable to the other two-sided factorizations as well.}, keywords = {Batched, Eigenvalue and singular value problems, hardware accelerators, numerical linear algebra, Two-sided factorization algorithms}, doi = {https://doi.org/10.1016/j.jocs.2018.01.007}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1161, title = {Accelerating the SVD Two Stage Bidiagonal Reduction and Divide and Conquer Using GPUs}, journal = {Parallel Computing}, volume = {74}, year = {2018}, month = {2018-05}, pages = {3{\textendash}18}, abstract = {The increasing gap between memory bandwidth and computation speed motivates the choice of algorithms to take full advantage of today{\textquoteright}s high performance computers. For dense matrices, the classic algorithm for the singular value decomposition (SVD) uses a one stage reduction to bidiagonal form, which is limited in performance by the memory bandwidth. To overcome this limitation, a two stage reduction to bidiagonal has been gaining popularity. It first reduces the matrix to band form using high performance Level 3 BLAS, then reduces the band matrix to bidiagonal form. As accelerators such as GPUs and co-processors are becoming increasingly widespread in high-performance computing, a question of great interest to many SVD users is how much the employment of a two stage reduction, as well as other current best practices in GPU computing, can accelerate this important routine. To fulfill this interest, we have developed an accelerated SVD employing a two stage reduction to bidiagonal and a number of other algorithms that are highly optimized for GPUs. Notably, we also parallelize and accelerate the divide and conquer algorithm used to solve the subsequent bidiagonal SVD. By accelerating all phases of the SVD algorithm, we provide a significant speedup compared to existing multi-core and GPU-based SVD implementations. In particular, using a P100 GPU, we illustrate a performance of up to 804 Gflop/s in double precision arithmetic to compute the full SVD of a 20k {\texttimes} 20k matrix in 90 seconds, which is 8.9 {\texttimes} faster than MKL on two 10 core Intel Haswell E5-2650 v3 CPUs, 3.7 {\texttimes} over the multi-core PLASMA two stage version, and 2.6 {\texttimes} over the previously accelerated one stage MAGMA version.}, keywords = {2-stage, accelerator, Divide and conquer, gpu, Singular value decomposition, SVD}, issn = {01678191}, doi = {10.1016/j.parco.2017.10.004}, url = {https://www.sciencedirect.com/science/article/pii/S0167819117301758}, author = {Mark Gates and Stanimire Tomov and Jack Dongarra} } @conference {1205, title = {ADAPT: An Event-Based Adaptive Collective Communication Framework}, booktitle = {The 27th International Symposium on High-Performance Parallel and Distributed Computing (HPDC {\textquoteright}18)}, year = {2018}, month = {2018-06}, publisher = {ACM Press}, organization = {ACM Press}, address = {Tempe, Arizona}, abstract = {The increase in scale and heterogeneity of high-performance computing (HPC) systems predispose the performance of Message Passing Interface (MPI) collective communications to be susceptible to noise, and to adapt to a complex mix of hardware capabilities. The designs of state of the art MPI collectives heavily rely on synchronizations; these designs magnify noise across the participating processes, resulting in significant performance slowdown. Therefore, such design philosophy must be reconsidered to efficiently and robustly run on the large-scale heterogeneous platforms. In this paper, we present ADAPT, a new collective communication framework in Open MPI, using event-driven techniques to morph collective algorithms to heterogeneous environments. The core concept of ADAPT is to relax synchronizations, while mamtaining the minimal data dependencies of MPI collectives. To fully exploit the different bandwidths of data movement lanes in heterogeneous systems, we extend the ADAPT collective framework with a topology-aware communication tree. This removes the boundaries of different hardware topologies while maximizing the speed of data movements. We evaluate our framework with two popular collective operations: broadcast and reduce on both CPU and GPU clusters. Our results demonstrate drastic performance improvements and a strong resistance against noise compared to other state of the art MPI libraries. In particular, we demonstrate at least 1.3X and 1.5X speedup for CPU data and 2X and 10X speedup for GPU data using ADAPT event-based broadcast and reduce operations.}, isbn = {9781450357852}, doi = {10.1145/3208040.3208054}, author = {Xi Luo and Wei Wu and George Bosilca and Thananon Patinyasakdikul and Linnan Wang and Jack Dongarra} } @techreport {1229, title = {Algorithms and Optimization Techniques for High-Performance Matrix-Matrix Multiplications of Very Small Matrices}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-09}, year = {2018}, month = {2018-09}, publisher = {Innovative Computing Laboratory, University of Tennessee}, abstract = {Expressing scientific computations in terms of BLAS, and in particular the general dense matrix-matrix multiplication (GEMM), is of fundamental importance for obtaining high performance portability across architectures. However, GEMMs for small matrices of sizes smaller than 32 are not sufficiently optimized in existing libraries. We consider the computation of many small GEMMs and its performance portability for a wide range of computer architectures, including Intel CPUs, ARM, IBM, Intel Xeon Phi, and GPUs. These computations often occur in applications like big data analytics, machine learning, high-order finite element methods (FEM), and others. The GEMMs are grouped together in a single batched routine. For these cases, we present algorithms and their optimization techniques that are specialized for the matrix sizes and architectures of interest. We derive a performance model and show that the new developments can be tuned to obtain performance that is within 90\% of the optimal for any of the architectures of interest. For example, on a V100 GPU for square matrices of size 32, we achieve an execution rate of about 1; 600 gigaFLOP/s in double-precision arithmetic, which is 95\% of the theoretically derived peak for this computation on a V100 GPU. We also show that these results outperform currently available state-of-the-art implementations such as vendor-tuned math libraries, including Intel MKL and NVIDIA CUBLAS, as well as open-source libraries like OpenBLAS and Eigen.}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Marc Baboulin and Jo{\"e}l Falcou and Jack Dongarra} } @article {1260, title = {Analysis and Design Techniques towards High-Performance and Energy-Efficient Dense Linear Solvers on GPUs}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {29}, year = {2018}, month = {2018-12}, pages = {2700{\textendash}2712}, abstract = {Graphics Processing Units (GPUs) are widely used in accelerating dense linear solvers. The matrix factorizations, which dominate the runtime for these solvers, are often designed using a hybrid scheme, where GPUs perform trailing matrix updates, while the CPUs perform the panel factorizations. Consequently, hybrid solutions require high-end CPUs and optimized CPU software in order to deliver high performance. Furthermore, they lack the energy efficiency inherent for GPUs due to the use of less energy-efficient CPUs, as well as CPU-GPU communications. This paper presents analysis and design techniques that overcome the shortcomings of the hybrid algorithms, and allow the design of high-performance and energy-efficient dense LU and Cholesky factorizations that use GPUs only. The full GPU solution eliminates the need for a high-end CPU and optimized CPU software, which leads to a better energy efficiency. We discuss different design choices, and introduce optimized GPU kernels for panel factorizations. The developed solutions achieve 90+ percent of the performance of optimized hybrid solutions, while improving the energy efficiency by 50 percent. They outperform the vendor library by 30-50 percent in single precision, and 15-50 percent in double precision. We also show that hybrid designs trail the proposed solutions in performance when optimized CPU software is not available.}, keywords = {Dense linear solvers, energy efficiency, GPU computing}, doi = {10.1109/TPDS.2018.2842785}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1195, title = {Analyzing Performance of BiCGStab with Hierarchical Matrix on GPU Clusters}, booktitle = {IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2018}, month = {2018-05}, publisher = {IEEE}, organization = {IEEE}, address = {Vancouver, BC, Canada}, abstract = {ppohBEM is an open-source software package im- plementing the boundary element method. One of its main software tasks is the solution of the dense linear system of equations, for which, ppohBEM relies on another software package called HACApK. To reduce the cost of solving the linear system, HACApK hierarchically compresses the coefficient matrix using adaptive cross approximation. This hierarchical compression greatly reduces the storage and time complexities of the solver and enables the solution of large-scale boundary value problems. To extend the capability of ppohBEM, in this paper, we carefully port the HACApK{\textquoteright}s linear solver onto GPU clusters. Though the potential of the GPUs has been widely accepted in high-performance computing, it is still a challenge to utilize the GPUs for a solver, like HACApK{\textquoteright}s, that requires fine-grained computation and global communication. First, to utilize the GPUs, we integrate the batched GPU kernel that was recently released in the MAGMA software package. We discuss several techniques to improve the performance of the batched kernel. We then study various techniques to address the inter-GPU communication and study their effects on state-of- the-art GPU clusters. We believe that the techniques studied in this paper are of interest to a wide range of software packages running on GPUs, especially with the increasingly complex node architectures and the growing costs of the communication. We also hope that our efforts to integrate the GPU kernel or to setup the inter-GPU communication will influence the design of the future-generation batched kernels or the communication layer within a software stack.}, author = {Ichitaro Yamazaki and Ahmad Abdelfattah and Akihiro Ida and Satoshi Ohshima and Stanimire Tomov and Rio Yokota and Jack Dongarra} } @article {1268, title = {Autotuning in High-Performance Computing Applications}, journal = {Proceedings of the IEEE}, volume = {106}, year = {2018}, month = {2018-11}, pages = {2068{\textendash}2083}, abstract = {Autotuning refers to the automatic generation of a search space of possible implementations of a computation that are evaluated through models and/or empirical measurement to identify the most desirable implementation. Autotuning has the potential to dramatically improve the performance portability of petascale and exascale applications. To date, autotuning has been used primarily in high-performance applications through tunable libraries or previously tuned application code that is integrated directly into the application. This paper draws on the authors{\textquoteright} extensive experience applying autotuning to high-performance applications, describing both successes and future challenges. If autotuning is to be widely used in the HPC community, researchers must address the software engineering challenges, manage configuration overheads, and continue to demonstrate significant performance gains and portability across architectures. In particular, tools that configure the application must be integrated into the application build process so that tuning can be reapplied as the application and target architectures evolve.}, keywords = {High-performance computing, performance tuning programming systems}, doi = {10.1109/JPROC.2018.2841200}, author = {Prasanna Balaprakash and Jack Dongarra and Todd Gamblin and Mary Hall and Jeffrey Hollingsworth and Boyana Norris and Richard Vuduc} } @article {1271, title = {Autotuning Numerical Dense Linear Algebra for Batched Computation With GPU Hardware Accelerators}, journal = {Proceedings of the IEEE}, volume = {106}, year = {2018}, month = {2018-11}, pages = {2040{\textendash}2055}, abstract = {Computational problems in engineering and scientific disciplines often rely on the solution of many instances of small systems of linear equations, which are called batched solves. In this paper, we focus on the important variants of both batch Cholesky factorization and subsequent substitution. The former requires the linear system matrices to be symmetric positive definite (SPD). We describe the implementation and automated performance engineering of these kernels that implement the factorization and the two substitutions. Our target platforms are graphics processing units (GPUs), which over the past decade have become an attractive high-performance computing (HPC) target for solvers of linear systems of equations. Due to their throughput-oriented design, GPUs exhibit the highest processing rates among the available processors. However, without careful design and coding, this speed is mostly restricted to large matrix sizes. We show an automated exploration of the implementation space as well as a new data layout for the batched class of SPD solvers. Our tests involve the solution of many thousands of linear SPD systems of exactly the same size. The primary focus of our techniques is on the individual matrices in the batch that have dimensions ranging from 5-by-5 up to 100-by-100. We compare our autotuned solvers against the state-of-the-art solvers such as those provided through NVIDIA channels and publicly available in the optimized MAGMA library. The observed performance is competitive and many times superior for many practical cases. The advantage of the presented methodology lies in achieving these results in a portable manner across matrix storage formats and GPU hardware architecture platforms.}, keywords = {Dense numerical linear algebra, performance autotuning}, doi = {10.1109/JPROC.2018.2868961}, author = {Jack Dongarra and Mark Gates and Jakub Kurzak and Piotr Luszczek and Yaohung Tsai} } @article {1277, title = {Autotuning Techniques for Performance-Portable Point Set Registration in 3D}, journal = {Supercomputing Frontiers and Innovations}, volume = {5}, number = {4}, year = {2018}, month = {2018-12}, chapter = {42}, abstract = {We present an autotuning approach applied to exhaustive performance engineering of the EM-ICP algorithm for the point set registration problem with a known reference. We were able to achieve progressively higher performance levels through a variety of code transformations and an automated procedure of generating a large number of implementation variants. Furthermore, we managed to exploit code patterns that are not common when only attempting manual optimization but which yielded in our tests better performance for the chosen registration algorithm. Finally, we also show how we maintained high levels of the performance rate in a portable fashion across a wide range of hardware platforms including multicore, manycore coprocessors, and accelerators. Each of these hardware classes is much different from the others and, consequently, cannot reliably be mastered by a single developer in a short time required to deliver a close-to-optimal implementation. We assert in our concluding remarks that our methodology as well as the presented tools provide a valid automation system for software optimization tasks on modern HPC hardware.}, doi = {10.14529/jsfi180404}, author = {Piotr Luszczek and Jakub Kurzak and Ichitaro Yamazaki and David Keffer and Vasileios Maroulas and Jack Dongarra} } @article {1300, title = {Batched BLAS (Basic Linear Algebra Subprograms) 2018 Specification}, year = {2018}, month = {2018-07}, abstract = {This document describes an API for Batch Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). We focus on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The extensions beyond the original BLAS standard are considered that specify a programming interface not only for routines with uniformly-sized matrices and/or vectors but also for the situation where the sizes vary. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance manycore platforms. These include multicore and many-core CPU processors; GPUs and coprocessors; as well as other hardware accelerators with floating-point compute facility.}, author = {Jack Dongarra and Iain Duff and Mark Gates and Azzam Haidar and Sven Hammarling and Nicholas J. Higham and Jonathan Hogg and Pedro Valero Lara and Piotr Luszczek and Mawussi Zounon and Samuel D. Relton and Stanimire Tomov and Timothy Costa and Sarah Knepper} } @article {1209, title = {Batched One-Sided Factorizations of Tiny Matrices Using GPUs: Challenges and Countermeasures}, journal = {Journal of Computational Science}, volume = {26}, year = {2018}, month = {2018-05}, pages = {226{\textendash}236}, abstract = {The use of batched matrix computations recently gained a lot of interest for applications, where the same operation is applied to many small independent matrices. The batched computational pattern is frequently encountered in applications of data analytics, direct/iterative solvers and preconditioners, computer vision, astrophysics, and more, and often requires specific designs for vectorization and extreme parallelism to map well on today{\textquoteright}s high-end many-core architectures. This has led to the development of optimized software for batch computations, and to an ongoing community effort to develop standard interfaces for batched linear algebra software. Furthering these developments, we present GPU design and optimization techniques for high-performance batched one-sided factorizations of millions of tiny matrices (of size 32 and less). We quantify the effects and relevance of different techniques in order to select the best-performing LU, QR, and Cholesky factorization designs. While we adapt common optimization techniques, such as optimal memory traffic, register blocking, and concurrency control, we also show that a different mindset and techniques are needed when matrices are tiny, and in particular, sub-vector/warp in size. The proposed routines are part of the MAGMA library and deliver significant speedups compared to their counterparts in currently available vendor-optimized libraries. Notably, we tune the developments for the newest V100 GPU from NVIDIA to show speedups of up to 11.8{\texttimes}.}, keywords = {batch computation, GPU computing, matrix factorization}, doi = {https://doi.org/10.1016/j.jocs.2018.01.005}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1211, title = {Big Data and Extreme-Scale Computing: Pathways to Convergence - Toward a Shaping Strategy for a Future Software and Data Ecosystem for Scientific Inquiry}, journal = {The International Journal of High Performance Computing Applications}, volume = {32}, year = {2018}, month = {2018-07}, pages = {435{\textendash}479}, abstract = {Over the past four years, the Big Data and Exascale Computing (BDEC) project organized a series of five international workshops that aimed to explore the ways in which the new forms of data-centric discovery introduced by the ongoing revolution in high-end data analysis (HDA) might be integrated with the established, simulation-centric paradigm of the high-performance computing (HPC) community. Based on those meetings, we argue that the rapid proliferation of digital data generators, the unprecedented growth in the volume and diversity of the data they generate, and the intense evolution of the methods for analyzing and using that data are radically reshaping the landscape of scientific computing. The most critical problems involve the logistics of wide-area, multistage workflows that will move back and forth across the computing continuum, between the multitude of distributed sensors, instruments and other devices at the networks edge, and the centralized resources of commercial clouds and HPC centers. We suggest that the prospects for the future integration of technological infrastructures and research ecosystems need to be considered at three different levels. First, we discuss the convergence of research applications and workflows that establish a research paradigm that combines both HPC and HDA, where ongoing progress is already motivating efforts at the other two levels. Second, we offer an account of some of the problems involved with creating a converged infrastructure for peripheral environments, that is, a shared infrastructure that can be deployed throughout the network in a scalable manner to meet the highly diverse requirements for processing, communication, and buffering/storage of massive data workflows of many different scientific domains. Third, we focus on some opportunities for software ecosystem convergence in big, logically centralized facilities that execute large-scale simulations and models and/or perform large-scale data analytics. We close by offering some conclusions and recommendations for future investment and policy review.}, doi = {https://doi.org/10.1177/1094342018778123}, author = {Mark Asch and Terry Moore and Rosa M. Badia and Micah Beck and Pete Beckman and Thierry Bidot and Fran{\c c}ois Bodin and Franck Cappello and Alok Choudhary and Bronis R. de Supinski and Ewa Deelman and Jack Dongarra and Anshu Dubey and Geoffrey Fox and Haohuan Fu and Sergi Girona and Michael Heroux and Yutaka Ishikawa and Kate Keahey and David Keyes and William T. Kramer and Jean-Fran{\c c}ois Lavignon and Yutong Lu and Satoshi Matsuoka and Bernd Mohr and St{\'e}phane Requena and Joel Saltz and Thomas Schulthess and Rick Stevens and Martin Swany and Alexander Szalay and William Tang and Ga{\"e}l Varoquaux and Jean-Pierre Vilotte and Robert W. Wisniewski and Zhiwei Xu and Igor Zacharov} } @article {1263, title = {Computational Benefit of GPU Optimization for Atmospheric Chemistry Modeling}, journal = {Journal of Advances in Modeling Earth Systems}, volume = {10}, year = {2018}, month = {2018-08}, pages = {1952{\textendash}1969}, abstract = {Global chemistry-climate models are computationally burdened as the chemical mechanisms become more complex and realistic. Optimization for graphics processing units (GPU) may make longer global simulation with regional detail possible, but limited study has been done to explore the potential benefit for the atmospheric chemistry modeling. Hence, in this study, the second-order Rosenbrock solver of the chemistry module of CAM4-Chem is ported to the GPU to gauge potential speed-up. We find that on the CPU, the fastest performance is achieved using the Intel compiler with a block interleaved memory layout. Different combinations of compiler and memory layout lead to ~11.02{\texttimes} difference in the computational time. In contrast, the GPU version performs the best when using a combination of fully interleaved memory layout with block size equal to the warp size, CUDA streams for independent kernels, and constant memory. Moreover, the most efficient data transfer between CPU and GPU is gained by allocating the memory contiguously during the data initialization on the GPU. Compared to one CPU core, the speed-up of using one GPU alone reaches a factor of ~11.7{\texttimes} for the computation alone and ~3.82{\texttimes} when the data transfer between CPU and GPU is considered. Using one GPU alone is also generally faster than the multithreaded implementation for 16 CPU cores in a compute node and the single-source solution (OpenACC). The best performance is achieved by the implementation of the hybrid CPU/GPU version, but rescheduling the workload among the CPU cores is required before the practical CAM4-Chem simulation.}, keywords = {compiler, CUDA, data transfer, gpu, hybrid, memory layout}, doi = {https://doi.org/10.1029/2018MS001276}, author = {Jian Sun and Joshua Fu and John Drake and Qingzhao Zhu and Azzam Haidar and Mark Gates and Stanimire Tomov and Jack Dongarra} } @techreport {1200, title = {Data Movement Interfaces to Support Dataflow Runtimes}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-03}, year = {2018}, month = {2018-05}, publisher = {University of Tennessee}, abstract = {This document presents the design study and reports on the implementation of a portable hosted accelerator device support in the PaRSEC Dataflow Tasking at Exascale runtime, undertaken as part of the ECP contract 17-SC-20-SC. The document discusses different technological approaches to transfer data to/from hosted accelerators, issues recommendations for technology providers, and presents the design of an OpenMP-based accelerator support in PaRSEC.}, author = {Aurelien Bouteiller and George Bosilca and Thomas Herault and Jack Dongarra} } @inproceedings {1259, title = {The Design of Fast and Energy-Efficient Linear Solvers: On the Potential of Half-Precision Arithmetic and Iterative Refinement Techniques}, journal = {International Conference on Computational Science (ICCS 2018)}, volume = {10860}, year = {2018}, month = {2018-06}, pages = {586{\textendash}600}, publisher = {Springer}, address = {Wuxi, China}, abstract = {As parallel computers approach exascale, power efficiency in high-performance computing (HPC) systems is of increasing concern. Exploiting both the hardware features and algorithms is an effective solution to achieve power efficiency, and to address the energy constraints in modern and future HPC systems. In this work, we present a novel design and implementation of an energy-efficient solution for dense linear systems of equations, which are at the heart of large-scale HPC applications. The proposed energy-efficient linear system solvers are based on two main components: (1) iterative refinement techniques, and (2) reduced-precision computing features in modern accelerators and coprocessors. While most of the energy efficiency approaches aim to reduce the consumption with a minimal performance penalty, our method improves both the performance and the energy efficiency. Compared to highly-optimized linear system solvers, our kernels deliver the same accuracy solution up to 2{\texttimes} faster and reduce the energy consumption up to half on Intel Knights Landing (KNL) architectures. By efficiently using the Tensor Cores available in the NVIDIA V100 PCIe GPUs, the speedups can be up to 4{\texttimes} , with more than 80\% reduction in the energy consumption.}, doi = {https://doi.org/10.1007/978-3-319-93698-7_45}, url = {https://rdcu.be/bcKSC}, author = {Azzam Haidar and Ahmad Abdelfattah and Mawussi Zounon and Panruo Wu and Srikara Pranesh and Stanimire Tomov and Jack Dongarra} } @techreport {1320, title = {Distributed Termination Detection for HPC Task-Based Environments}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-14}, year = {2018}, month = {2018-06}, publisher = {University of Tennessee}, abstract = {This paper revisits distributed termination detection algorithms in the context of high-performance computing applications in task systems. We first outline the need to efficiently detect termination in workflows for which the total number of tasks is data dependent and therefore not known statically but only revealed dynamically during execution. We introduce an efficient variant of the Credit Distribution Algorithm (CDA) and compare it to the original algorithm (HCDA) as well as to its two primary competitors: the Four Counters algorithm (4C) and the Efficient Delay-Optimal Distributed algorithm (EDOD). On the theoretical side, we analyze the behavior of each algorithm for some simplified task-based kernels and show the superiority of CDA in terms of the number of control messages. On the practical side, we provide a highly tuned implementation of each termination detection algorithm within PaRSEC and compare their performance for a variety of benchmarks, extracted from scientific applications that exhibit dynamic behaviors.}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Valentin Le F{\`e}vre and Yves Robert and Jack Dongarra} } @conference {1214, title = {Do moldable applications perform better on failure-prone HPC platforms?}, booktitle = {11th Workshop on Resiliency in High Performance Computing in Clusters, Clouds, and Grids}, series = {LNCS}, year = {2018}, month = {2018-08}, publisher = {Springer Verlag}, organization = {Springer Verlag}, address = {Turin, Italy}, abstract = {This paper compares the performance of different approaches to tolerate failures using checkpoint/restart when executed on large-scale failure-prone platforms. We study (i) Rigid applications, which use a constant number of processors throughout execution; (ii) Moldable applications, which can use a different number of processors after each restart following a fail-stop error; and (iii) GridShaped applications, which are moldable applications restricted to use rectangular processor grids (such as many dense linear algebra kernels). For each application type, we compute the optimal number of failures to tolerate before relinquishing the current allocation and waiting until a new resource can be allocated, and we determine the optimal yield that can be achieved. We instantiate our performance model with a realistic applicative scenario and make it publicly available for further usage.}, author = {Valentin Le F{\`e}vre and George Bosilca and Aurelien Bouteiller and Thomas Herault and Atsushi Hori and Yves Robert and Jack Dongarra} } @techreport {1232, title = {Evaluation and Design of FFT for Distributed Accelerated Systems}, journal = {ECP WBS 2.3.3.09 Milestone Report}, number = {FFT-ECP ST-MS-10-1216}, year = {2018}, month = {2018-10}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Stanimire Tomov and Azzam Haidar and Daniel Schultz and Jack Dongarra} } @article {1201, title = {Evaluation of Dataflow Programming Models for Electronic Structure Theory}, journal = {Concurrency and Computation: Practice and Experience: Special Issue on Parallel and Distributed Algorithms}, volume = {2018}, year = {2018}, month = {2018-05}, pages = {1{\textendash}20}, abstract = {Dataflow programming models have been growing in popularity as a means to deliver a good balance between performance and portability in the post-petascale era. In this paper, we evaluate different dataflow programming models for electronic structure methods and compare them in terms of programmability, resource utilization, and scalability. In particular, we evaluate two programming paradigms for expressing scientific applications in a dataflow form: (1) explicit dataflow, where the dataflow is specified explicitly by the developer, and (2) implicit dataflow, where a task scheduling runtime derives the dataflow using per-task data-access information embedded in a serial program. We discuss our findings and present a thorough experimental analysis using methods from the NWChem quantum chemistry application as our case study, and OpenMP, StarPU, and PaRSEC as the task-based runtimes that enable the different forms of dataflow execution. Furthermore, we derive an abstract model to explore the limits of the different dataflow programming paradigms.}, keywords = {CCSD, coupled cluster methods, dataflow, NWChem, OpenMP, parsec, StarPU, task-based runtime}, doi = {https://doi.org/10.1002/cpe.4490}, author = {Heike Jagode and Anthony Danalis and Reazul Hoque and Mathieu Faverge and Jack Dongarra} } @article {1089, title = {A Failure Detector for HPC Platforms}, journal = {The International Journal of High Performance Computing Applications}, volume = {32}, year = {2018}, month = {2018-01}, pages = {139{\textendash}158}, abstract = {Building an infrastructure for exascale applications requires, in addition to many other key components, a stable and efficient failure detector. This article describes the design and evaluation of a robust failure detector that can maintain and distribute the correct list of alive resources within proven and scalable bounds. The detection and distribution of the fault information follow different overlay topologies that together guarantee minimal disturbance to the applications. A virtual observation ring minimizes the overhead by allowing each node to be observed by another single node, providing an unobtrusive behavior. The propagation stage uses a nonuniform variant of a reliable broadcast over a circulant graph overlay network and guarantees a logarithmic fault propagation. Extensive simulations, together with experiments on the Titan Oak Ridge National Laboratory supercomputer, show that the algorithm performs extremely well and exhibits all the desired properties of an exascale-ready algorithm.}, keywords = {failure detection, Fault tolerance, MPI}, doi = {https://doi.org/10.1177/1094342017711505}, author = {George Bosilca and Aurelien Bouteiller and Amina Guermouche and Thomas Herault and Yves Robert and Pierre Sens and Jack Dongarra} } @article {1208, title = {A Guide for Achieving High Performance with Very Small Matrices on GPUs: A Case Study of Batched LU and Cholesky Factorizations}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {29}, year = {2018}, month = {2018-05}, pages = {973{\textendash}984}, abstract = {We present a high-performance GPU kernel with a substantial speedup over vendor libraries for very small matrix computations. In addition, we discuss most of the challenges that hinder the design of efficient GPU kernels for small matrix algorithms. We propose relevant algorithm analysis to harness the full power of a GPU, and strategies for predicting the performance, before introducing a proper implementation. We develop a theoretical analysis and a methodology for high-performance linear solvers for very small matrices. As test cases, we take the Cholesky and LU factorizations and show how the proposed methodology enables us to achieve a performance close to the theoretical upper bound of the hardware. This work investigates and proposes novel algorithms for designing highly optimized GPU kernels for solving batches of hundreds of thousands of small-size Cholesky and LU factorizations. Our focus on efficient batched Cholesky and batched LU kernels is motivated by the increasing need for these kernels in scientific simulations (e.g., astrophysics applications). Techniques for optimal memory traffic, register blocking, and tunable concurrency are incorporated in our proposed design. The proposed GPU kernels achieve performance speedups versus CUBLAS of up to 6x for the factorizations, using double precision arithmetic on an NVIDIA Pascal P100 GPU.}, doi = {10.1109/TPDS.2017.2783929}, author = {Azzam Haidar and Ahmad Abdelfattah and Mawussi Zounon and Stanimire Tomov and Jack Dongarra} } @conference {1264, title = {Harnessing GPU Tensor Cores for Fast FP16 Arithmetic to Speed up Mixed-Precision Iterative Refinement Solvers}, booktitle = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC18)}, year = {2018}, month = {2018-11}, publisher = {IEEE}, organization = {IEEE}, address = {Dallas, TX}, abstract = {Low-precision floating-point arithmetic is a powerful tool for accelerating scientific computing applications, especially those in artificial intelligence. Here, we present an investigation showing that other high-performance computing (HPC) applications can also harness this power. Specifically, we use the general HPC problem, Ax = b, where A is a large dense matrix, and a double precision (FP64) solution is needed for accuracy. Our approach is based on mixed-precision (FP16-FP64) iterative refinement, and we generalize and extend prior advances into a framework, for which we develop architecture-specific algorithms and highly tuned implementations. These new methods show how using half-precision Tensor Cores (FP16-TC) for the arithmetic can provide up to 4{\texttimes} speedup. This is due to the performance boost that the FP16-TC provide as well as to the improved accuracy over the classical FP16 arithmetic that is obtained because the GEMM accumulation occurs in FP32 arithmetic.}, doi = {https://doi.org/10.1109/SC.2018.00050}, author = {Azzam Haidar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @article {1335, title = {Harnessing GPU{\textquoteright}s Tensor Cores Fast FP16 Arithmetic to Speedup Mixed-Precision Iterative Refinement Solvers and Achieve 74 Gflops/Watt on Nvidia V100}, year = {2018}, month = {2018-03}, publisher = {GPU Technology Conference (GTC), Poster}, address = {San Jose, CA}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @techreport {1203, title = {Implementation of the C++ API for Batch BLAS}, journal = {SLATE Working Notes}, number = {07, ICL-UT-18-04}, year = {2018}, month = {2018-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Ahmad Abdelfattah and Mark Gates and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @article {1158, title = {Incomplete Sparse Approximate Inverses for Parallel Preconditioning}, journal = {Parallel Computing}, volume = {71}, year = {2018}, month = {2018-01}, pages = {1{\textendash}22}, abstract = {In this paper, we propose a new preconditioning method that can be seen as a generalization of block-Jacobi methods, or as a simplification of the sparse approximate inverse (SAI) preconditioners. The {\textquotedblleft}Incomplete Sparse Approximate Inverses{\textquotedblright} (ISAI) is in particular efficient in the solution of sparse triangular linear systems of equations. Those arise, for example, in the context of incomplete factorization preconditioning. ISAI preconditioners can be generated via an algorithm providing fine-grained parallelism, which makes them attractive for hardware with a high concurrency level. In a study covering a large number of matrices, we identify the ISAI preconditioner as an attractive alternative to exact triangular solves in the context of incomplete factorization preconditioning.}, issn = {01678191}, doi = {10.1016/j.parco.2017.10.003}, url = {http://www.sciencedirect.com/science/article/pii/S016781911730176X}, author = {Hartwig Anzt and Thomas Huckle and J{\"u}rgen Br{\"a}ckle and Jack Dongarra} } @techreport {1274, title = {Initial Integration and Evaluation of SLATE and STRUMPACK}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-11}, year = {2018}, month = {2018-12}, publisher = {University of Tennessee}, author = {Pieter Ghysels and Sherry Li and Asim YarKhan and Jack Dongarra} } @techreport {1207, title = {Initial Integration and Evaluation of SLATE Parallel BLAS in LATTE}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-07}, year = {2018}, month = {2018-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Asim YarKhan and Gerald Ragghianti and Jack Dongarra and Marc Cawkwell and Danny Perez and Arthur Voter} } @article {1199, title = {Investigating Power Capping toward Energy-Efficient Scientific Applications}, journal = {Concurrency Computation: Practice and Experience}, volume = {2018}, year = {2018}, month = {2018-04}, pages = {1-14}, abstract = {The emergence of power efficiency as a primary constraint in processor and system design poses new challenges concerning power and energy awareness for numerical libraries and scientific applications. Power consumption also plays a major role in the design of data centers, which may house petascale or exascale-level computing systems. At these extreme scales, understanding and improving the energy efficiency of numerical libraries and their related applications becomes a crucial part of the successful implementation and operation of the computing system. In this paper, we study and investigate the practice of controlling a compute system{\textquoteright}s power usage, and we explore how different power caps affect the performance of numerical algorithms with different computational intensities. Further, we determine the impact, in terms of performance and energy usage, that these caps have on a system running scientific applications. This analysis will enable us to characterize the types of algorithms that benefit most from these power management schemes. Our experiments are performed using a set of representative kernels and several popular scientific benchmarks. We quantify a number of power and performance measurements and draw observations and conclusions that can be viewed as a roadmap to achieving energy efficiency in the design and execution of scientific algorithms.}, keywords = {energy efficiency, High Performance Computing, Intel Xeon Phi, Knights landing, papi, performance analysis, Performance Counters, power efficiency}, doi = {https://doi.org/10.1002/cpe.4485}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cpe.4485}, author = {Azzam Haidar and Heike Jagode and Phil Vaccaro and Asim YarKhan and Stanimire Tomov and Jack Dongarra} } @conference {1233, title = {A Jaccard Weights Kernel Leveraging Independent Thread Scheduling on GPUs}, booktitle = {SBAC-PAD}, year = {2018}, publisher = {IEEE}, organization = {IEEE}, address = {Lyon, France}, url = {https://ieeexplore.ieee.org/document/8645946}, author = {Anzt, Hartwig and Jack Dongarra} } @techreport {1273, title = {Least Squares Performance Report}, journal = {SLATE Working Notes}, number = {09, ICL-UT-18-10}, year = {2018}, month = {2018-12}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Mark Gates and Ali Charara and Jakub Kurzak and Asim YarKhan and Ichitaro Yamazaki and Jack Dongarra} } @techreport {1228, title = {Linear Systems Performance Report}, journal = {SLATE Working Notes}, number = {08, ICL-UT-18-08}, year = {2018}, month = {2018-09}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Jakub Kurzak and Mark Gates and Ichitaro Yamazaki and Ali Charara and Asim YarKhan and Jamie Finney and Gerald Ragghianti and Piotr Luszczek and Jack Dongarra} } @article {1330, title = {MATEDOR: MAtrix, TEnsor, and Deep-learning Optimized Routines}, year = {2018}, month = {2018-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC18), Research Poster}, address = {Dallas, TX}, author = {Ahmad Abdelfattah and Jack Dongarra and Azzam Haidar and Stanimire Tomov and Ichitaro Yamazaki} } @article {1333, title = {MAtrix, TEnsor, and Deep-learning Optimized Routines (MATEDOR)}, year = {2018}, month = {2018-04}, publisher = {NSF PI Meeting, Poster}, address = {Washington, DC}, doi = {https://doi.org/10.6084/m9.figshare.6174143.v3}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Ichitaro Yamazaki and Jack Dongarra} } @conference {1196, title = {Optimal Cooperative Checkpointing for Shared High-Performance Computing Platforms}, booktitle = {2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), Best Paper Award}, year = {2018}, month = {2018-05}, publisher = {IEEE}, organization = {IEEE}, address = {Vancouver, BC, Canada}, abstract = {In high-performance computing environments, input/output (I/O) from various sources often contend for scarce available bandwidth. Adding to the I/O operations inherent to the failure-free execution of an application, I/O from checkpoint/restart (CR) operations (used to ensure progress in the presence of failures) place an additional burden as it increase I/O contention, leading to degraded performance. In this work, we consider a cooperative scheduling policy that optimizes the overall performance of concurrently executing CR-based applications which share valuable I/O resources. First, we provide a theoretical model and then derive a set of necessary constraints needed to minimize the global waste on the platform. Our results demonstrate that the optimal checkpoint interval, as defined by Young/Daly, despite providing a sensible metric for a single application, is not sufficient to optimally address resource contention at the platform scale. We therefore show that combining optimal checkpointing periods with I/O scheduling strategies can provide a significant improvement on the overall application performance, thereby maximizing platform throughput. Overall, these results provide critical analysis and direct guidance on checkpointing large-scale workloads in the presence of competing I/O while minimizing the impact on application performance.}, doi = {10.1109/IPDPSW.2018.00127}, author = {Thomas Herault and Yves Robert and Aurelien Bouteiller and Dorian Arnold and Kurt Ferreira and George Bosilca and Jack Dongarra} } @article {1219, title = {Optimization and Performance Evaluation of the IDR Iterative Krylov Solver on GPUs}, journal = {The International Journal of High Performance Computing Applications}, volume = {32}, number = {2}, year = {2018}, month = {2018-03}, pages = {220{\textendash}230}, abstract = {In this paper, we present an optimized GPU implementation for the induced dimension reduction algorithm. We improve data locality, combine it with an efficient sparse matrix vector kernel, and investigate the potential of overlapping computation with communication as well as the possibility of concurrent kernel execution. A comprehensive performance evaluation is conducted using a suitable performance model. The analysis reveals efficiency of up to 90\%, which indicates that the implementation achieves performance close to the theoretically attainable bound.}, keywords = {co-design, gpu, Induced dimension reduction (IDR), kernel fusion, kernel overlap, roofline performance model}, doi = {https://doi.org/10.1177/1094342016646844}, author = {Hartwig Anzt and Moritz Kreutzer and Eduardo Ponce and Gregory D. Peterson and Gerhard Wellein and Jack Dongarra} } @conference {1210, title = {Optimizing GPU Kernels for Irregular Batch Workloads: A Case Study for Cholesky Factorization}, booktitle = {IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}18)}, year = {2018}, month = {2018-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {This paper introduces several frameworks for the design and implementation of high performance GPU kernels that target batch workloads with irregular sizes. Such workloads are ubiquitous in many scientific applications, including sparse direct solvers, astrophysics, and quantum chemistry. The paper addresses two main categories of frameworks, taking the Cholesky factorization as a case study. The first uses hostside kernel launches, and the second uses device-side launches. Within each category, different design options are introduced, with an emphasis on the advantages and the disadvantages of each approach. Our best performing design outperforms the state-of-the-art CPU implementation, scoring up to 4.7{\texttimes} speedup in double precision on a Pascal P100 GPU.}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1393, title = {PAPI: Counting outside the Box}, year = {2018}, month = {2018-04}, publisher = {8th JLESC Meeting}, address = {Barcelona, Spain}, author = {Anthony Danalis and Heike Jagode and Jack Dongarra} } @article {1391, title = {PAPI{\textquoteright}s New Software-Defined Events for In-Depth Performance Analysis}, year = {2018}, month = {2018-09}, publisher = {CCDSC 2018: Workshop on Clusters, Clouds, and Data for Scientific Computing}, address = {Lyon, France}, abstract = {One of the most recent developments of the Performance API (PAPI) is the addition of Software-Defined Events (SDE). PAPI has successfully served the role of the abstraction and unification layer for hardware performance counters for over a decade. This talk presents our effort to extend this role to encompass performance critical information that does not originate in hardware, but rather in critical software layers, such as libraries and runtime systems. Our overall objective is to enable monitoring of both types of performance events, hardware- and software-related events, in a uniform way, through one consistent PAPI interface. Performance analysts will be able to form a complete picture of the entire application performance without learning new instrumentation primitives. In this talk, we outline PAPI{\textquoteright}s new SDE API and showcase the usefulness of SDE through its employment in software layers as diverse as the math library MAGMA, the dataflow runtime PaRSEC, and the state-of-the-art chemistry application NWChem. We outline the process of instrumenting these software packages and highlight the performance information that can be acquired with SDEs.}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @techreport {1191, title = {Parallel BLAS Performance Report}, journal = {SLATE Working Notes}, number = {05, ICL-UT-18-01}, year = {2018}, month = {2018-04}, publisher = {University of Tennessee}, author = {Jakub Kurzak and Mark Gates and Asim YarKhan and Ichitaro Yamazaki and Panruo Wu and Piotr Luszczek and Jamie Finney and Jack Dongarra} } @techreport {1206, title = {Parallel Norms Performance Report}, journal = {SLATE Working Notes}, number = {06, ICL-UT-18-06}, year = {2018}, month = {2018-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Jakub Kurzak and Mark Gates and Asim YarKhan and Ichitaro Yamazaki and Piotr Luszczek and Jamie Finney and Jack Dongarra} } @article {1190, title = {ParILUT - A New Parallel Threshold ILU}, journal = {SIAM Journal on Scientific Computing}, volume = {40}, year = {2018}, month = {2018-07}, pages = {C503{\textendash}C519}, publisher = {SIAM}, abstract = {We propose a parallel algorithm for computing a threshold incomplete LU (ILU) factorization. The main idea is to interleave a parallel fixed-point iteration that approximates an incomplete factorization for a given sparsity pattern with a procedure that adjusts the pattern. We describe and test a strategy for identifying nonzeros to be added and nonzeros to be removed from the sparsity pattern. The resulting pattern may be different and more effective than that of existing threshold ILU algorithms. Also in contrast to other parallel threshold ILU algorithms, much of the new algorithm has fine-grained parallelism.}, doi = {https://doi.org/10.1137/16M1079506}, author = {Hartwig Anzt and Edmond Chow and Jack Dongarra} } @article {1258, title = {The Singular Value Decomposition: Anatomy of Optimizing an Algorithm for Extreme Scale}, journal = {SIAM Review}, volume = {60}, year = {2018}, month = {2018-11}, pages = {808{\textendash}865}, abstract = {The computation of the singular value decomposition, or SVD, has a long history with many improvements over the years, both in its implementations and algorithmically. Here, we survey the evolution of SVD algorithms for dense matrices, discussing the motivation and performance impacts of changes. There are two main branches of dense SVD methods: bidiagonalization and Jacobi. Bidiagonalization methods started with the implementation by Golub and Reinsch in Algol60, which was subsequently ported to Fortran in the EISPACK library, and was later more efficiently implemented in the LINPACK library, targeting contemporary vector machines. To address cache-based memory hierarchies, the SVD algorithm was reformulated to use Level 3 BLAS in the LAPACK library. To address new architectures, ScaLAPACK was introduced to take advantage of distributed computing, and MAGMA was developed for accelerators such as GPUs. Algorithmically, the divide and conquer and MRRR algorithms were developed to reduce the number of operations. Still, these methods remained memory bound, so two-stage algorithms were developed to reduce memory operations and increase the computational intensity, with efficient implementations in PLASMA, DPLASMA, and MAGMA. Jacobi methods started with the two-sided method of Kogbetliantz and the one-sided method of Hestenes. They have likewise had many developments, including parallel and block versions and preconditioning to improve convergence. In this paper, we investigate the impact of these changes by testing various historical and current implementations on a common, modern multicore machine and a distributed computing platform. We show that algorithmic and implementation improvements have increased the speed of the SVD by several orders of magnitude, while using up to 40 times less energy.}, keywords = {bidiagonal matrix, bisection, Divide and conquer, Hestenes method, Jacobi method, Kogbetliantz method, MRRR, QR iteration, Singular value decomposition, SVD}, issn = {0036-1445}, doi = {10.1137/17M1117732}, url = {https://epubs.siam.org/doi/10.1137/17M1117732}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @techreport {1275, title = {Software-Defined Events (SDEs) in MAGMA-Sparse}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-12}, year = {2018}, month = {2018-12}, publisher = {University of Tennessee}, author = {Heike Jagode and Anthony Danalis and Hartwig Anzt and Ichitaro Yamazaki and Mark Hoemmen and Erik Boman and Stanimire Tomov and Jack Dongarra} } @article {1392, title = {Software-Defined Events through PAPI for In-Depth Analysis of Application Performance}, year = {2018}, month = {2018-07}, publisher = {5th Platform for Advanced Scientific Computing Conference (PASC18)}, address = {Basel, Switzerland}, author = {Anthony Danalis and Heike Jagode and Jack Dongarra} } @techreport {1204, title = {Solver Interface \& Performance on Cori}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-05}, year = {2018}, month = {2018-06}, publisher = {University of Tennessee}, author = {Hartwig Anzt and Ichitaro Yamazaki and Mark Hoemmen and Erik Boman and Jack Dongarra} } @article {1189, title = {Symmetric Indefinite Linear Solver using OpenMP Task on Multicore Architectures}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {29}, year = {2018}, month = {2018-08}, pages = {1879{\textendash}1892}, abstract = {Recently, the Open Multi-Processing (OpenMP) standard has incorporated task-based programming, where a function call with input and output data is treated as a task. At run time, OpenMP{\textquoteright}s superscalar scheduler tracks the data dependencies among the tasks and executes the tasks as their dependencies are resolved. On a shared-memory architecture with multiple cores, the independent tasks are executed on different cores in parallel, thereby enabling parallel execution of a seemingly sequential code. With the emergence of many-core architectures, this type of programming paradigm is gaining attention-not only because of its simplicity, but also because it breaks the artificial synchronization points of the program and improves its thread-level parallelization. In this paper, we use these new OpenMP features to develop a portable high-performance implementation of a dense symmetric indefinite linear solver. Obtaining high performance from this kind of solver is a challenge because the symmetric pivoting, which is required to maintain numerical stability, leads to data dependencies that prevent us from using some common performance-improving techniques. To fully utilize a large number of cores through tasking, while conforming to the OpenMP standard, we describe several techniques. Our performance results on current many-core architectures-including Intel{\textquoteright}s Broadwell, Intel{\textquoteright}s Knights Landing, IBM{\textquoteright}s Power8, and Arm{\textquoteright}s ARMv8-demonstrate the portable and superior performance of our implementation compared with the Linear Algebra PACKage (LAPACK). The resulting solver is now available as a part of the PLASMA software package.}, keywords = {linear algebra, multithreading, runtime, symmetric indefinite matrices}, doi = {10.1109/TPDS.2018.2808964}, author = {Ichitaro Yamazaki and Jakub Kurzak and Panruo Wu and Mawussi Zounon and Jack Dongarra} } @article {1448, title = {Task Based Cholesky Decomposition on Xeon Phi Architectures using OpenMP}, journal = {International Journal of Computational Science and Engineering (IJCSE)}, volume = {17}, number = {3}, year = {2018}, month = {2018-10}, abstract = {The increasing number of computational cores in modern many-core processors, as represented by the Intel Xeon Phi architectures, has created the need for an open-source, high performance and scalable task-based dense linear algebra package that can efficiently use this type of many-core hardware. In this paper, we examined the design modifications necessary when porting PLASMA, a task-based dense linear algebra library, run effectively on two generations of Intel{\textquoteright}s Xeon Phi architecture, known as knights corner (KNC) and knights landing (KNL). First, we modified PLASMA{\textquoteright}s tiled Cholesky decomposition to use OpenMP tasks for its scheduling mechanism to enable Xeon Phi compatibility. We then compared the performance of our modified code to that of the original dynamic scheduler running on an Intel Xeon Sandy Bridge CPU. Finally, we looked at the performance of the OpenMP tiled Cholesky decomposition on knights corner and knights landing processors. We detail the optimisations required to obtain performance on these platforms and compare with the highly tuned Intel MKL math library.}, doi = {http://dx.doi.org/10.1504/IJCSE.2018.095851}, author = {Joseph Dorris and Asim YarKhan and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @article {1334, title = {Tensor Contractions using Optimized Batch GEMM Routines}, year = {2018}, month = {2018-03}, publisher = {GPU Technology Conference (GTC), Poster}, address = {San Jose, CA}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1332, title = {Using GPU FP16 Tensor Cores Arithmetic to Accelerate Mixed-Precision Iterative Refinement Solvers and Reduce Energy Consumption}, year = {2018}, month = {2018-06}, publisher = {ISC High Performance (ISC18), Best Poster Award}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Mawussi Zounon and Jack Dongarra} } @conference {1265, title = {Using GPU FP16 Tensor Cores Arithmetic to Accelerate Mixed-Precision Iterative Refinement Solvers and Reduce Energy Consumption}, booktitle = {ISC High Performance (ISC{\textquoteright}18), Best Poster}, year = {2018}, month = {2018-06}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Mawussi Zounon and Jack Dongarra} } @article {1221, title = {Using Jacobi Iterations and Blocking for Solving Sparse Triangular Systems in Incomplete Factorization Preconditioning}, journal = {Journal of Parallel and Distributed Computing}, volume = {119}, year = {2018}, month = {2018-11}, pages = {219{\textendash}230}, abstract = {When using incomplete factorization preconditioners with an iterative method to solve large sparse linear systems, each application of the preconditioner involves solving two sparse triangular systems. These triangular systems are challenging to solve efficiently on computers with high levels of concurrency. On such computers, it has recently been proposed to use Jacobi iterations, which are highly parallel, to approximately solve the triangular systems from incomplete factorizations. The effectiveness of this approach, however, is problem-dependent: the Jacobi iterations may not always converge quickly enough for all problems. Thus, as a necessary and important step to evaluate this approach, we experimentally test the approach on a large number of realistic symmetric positive definite problems. We also show that by using block Jacobi iterations, we can extend the range of problems for which such an approach can be effective. For block Jacobi iterations, it is essential for the blocking to be cognizant of the matrix structure.}, doi = {https://doi.org/10.1016/j.jpdc.2018.04.017}, author = {Edmond Chow and Hartwig Anzt and Jennifer Scott and Jack Dongarra} } @conference {1234, title = {Variable-Size Batched Condition Number Calculation on GPUs}, booktitle = {SBAC-PAD}, year = {2018}, month = {2018-09}, address = {Lyon, France}, url = {https://ieeexplore.ieee.org/document/8645907}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Thomas Gruetzmacher} } @article {999, title = {Accelerating NWChem Coupled Cluster through Dataflow-Based Execution}, journal = {The International Journal of High Performance Computing Applications}, year = {2017}, month = {2017-01}, pages = {1{\textendash}13}, abstract = {Numerical techniques used for describing many-body systems, such as the Coupled Cluster methods (CC) of the quantum chemistry package NWChem, are of extreme interest to the computational chemistry community in fields such as catalytic reactions, solar energy, and bio-mass conversion. In spite of their importance, many of these computationally intensive algorithms have traditionally been thought of in a fairly linear fashion, or are parallelized in coarse chunks. In this paper, we present our effort of converting the NWChem{\textquoteright}s CC code into a dataflow-based form that is capable of utilizing the task scheduling system PaRSEC (Parallel Runtime Scheduling and Execution Controller): a software package designed to enable high-performance computing at scale. We discuss the modularity of our approach and explain how the PaRSEC-enabled dataflow version of the subroutines seamlessly integrate into the NWChem codebase. Furthermore, we argue how the CC algorithms can be easily decomposed into finer-grained tasks (compared with the original version of NWChem); and how data distribution and load balancing are decoupled and can be tuned independently. We demonstrate performance acceleration by more than a factor of two in the execution of the entire CC component of NWChem, concluding that the utilization of dataflow-based execution for CC methods enables more efficient and scalable computation.}, keywords = {CCSD, dag, dataflow, NWChem, parsec, ptg, tasks}, doi = {10.1177/1094342016672543}, url = {http://journals.sagepub.com/doi/10.1177/1094342016672543}, author = {Heike Jagode and Anthony Danalis and Jack Dongarra} } @article {1341, title = {Accelerating Tensor Contractions in High-Order FEM with MAGMA Batched}, year = {2017}, month = {2017-03}, publisher = {SIAM Conference on Computer Science and Engineering (SIAM CSE17), Presentation}, address = {Atlanta, GA}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @conference {1169, title = {Autotuning Batch Cholesky Factorization in CUDA with Interleaved Layout of Matrices}, booktitle = {Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2017}, month = {2017-06}, publisher = {IEEE}, organization = {IEEE}, address = {Orlando, FL}, abstract = {Batch matrix operations address the case of solving the same linear algebra problem for a very large number of very small matrices. In this paper, we focus on implementing the batch Cholesky factorization in CUDA, in single precision arithmetic, for NVIDIA GPUs. Specifically, we look into the benefits of using noncanonical data layouts, where consecutive memory locations store elements with the same row and column index in a set of consecutive matrices. We discuss a number of different implementation options and tuning parameters. We demonstrate superior performance to traditional implementations for the case of very small matrices.}, keywords = {batch computation, Cholesky Factorization, data layout, GPU computing, numerical linear algebra}, doi = {10.1109/IPDPSW.2017.18}, author = {Mark Gates and Jakub Kurzak and Piotr Luszczek and Yu Pei and Jack Dongarra} } @inproceedings {998, title = {Batched Gauss-Jordan Elimination for Block-Jacobi Preconditioner Generation on GPUs}, journal = {Proceedings of the 8th International Workshop on Programming Models and Applications for Multicores and Manycores}, year = {2017}, month = {2017-02}, pages = {1{\textendash}10}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {In this paper, we design and evaluate a routine for the efficient generation of block-Jacobi preconditioners on graphics processing units (GPUs). Concretely, to exploit the architecture of the graphics accelerator, we develop a batched Gauss-Jordan elimination CUDA kernel for matrix inversion that embeds an implicit pivoting technique and handles the entire inversion process in the GPU registers. In addition, we integrate extraction and insertion CUDA kernels to rapidly set up the block-Jacobi preconditioner. Our experiments compare the performance of our implementation against a sequence of batched routines from the MAGMA library realizing the inversion via the LU factorization with partial pivoting. Furthermore, we evaluate the costs of different strategies for the block-Jacobi extraction and insertion steps, using a variety of sparse matrices from the SuiteSparse matrix collection. Finally, we assess the efficiency of the complete block-Jacobi preconditioner generation in the context of an iterative solver applied to a set of computational science problems, and quantify its benefits over a scalar Jacobi preconditioner.}, keywords = {block-Jacobi preconditioner, Gauss-Jordan elimination, graphics processing units (GPUs), iterative methods, matrix inversion, sparse linear systems}, isbn = {978-1-4503-4883-6}, doi = {10.1145/3026937.3026940}, url = {http://doi.acm.org/10.1145/3026937.3026940}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @conference {1093, title = {Bidiagonalization and R-Bidiagonalization: Parallel Tiled Algorithms, Critical Paths and Distributed-Memory Implementation}, booktitle = {IEEE International Parallel and Distributed Processing Symposium (IPDPS)}, year = {2017}, month = {2017-05}, publisher = {IEEE}, organization = {IEEE}, address = {Orlando, FL}, abstract = {We study tiled algorithms for going from a "full" matrix to a condensed "band bidiagonal" form using orthog-onal transformations: (i) the tiled bidiagonalization algorithm BIDIAG, which is a tiled version of the standard scalar bidiago-nalization algorithm; and (ii) the R-bidiagonalization algorithm R-BIDIAG, which is a tiled version of the algorithm which consists in first performing the QR factorization of the initial matrix, then performing the band-bidiagonalization of the R- factor. For both BIDIAG and R-BIDIAG, we use four main types of reduction trees, namely FLATTS, FLATTT, GREEDY, and a newly introduced auto-adaptive tree, AUTO. We provide a study of critical path lengths for these tiled algorithms, which shows that (i) R-BIDIAG has a shorter critical path length than BIDIAG for tall and skinny matrices, and (ii) GREEDY based schemes are much better than earlier proposed algorithms with unbounded resources. We provide experiments on a single multicore node, and on a few multicore nodes of a parallel distributed shared- memory system, to show the superiority of the new algorithms on a variety of matrix sizes, matrix shapes and core counts.}, keywords = {Algorithm design and analysis, Approximation algorithms, Kernel, Multicore processing, Shape, Software algorithms, Transforms}, doi = {10.1109/IPDPS.2017.46}, author = {Mathieu Faverge and Julien Langou and Yves Robert and Jack Dongarra} } @inbook {1167, title = {Bringing High Performance Computing to Big Data Algorithms}, booktitle = {Handbook of Big Data Technologies}, year = {2017}, publisher = {Springer}, organization = {Springer}, isbn = {978-3-319-49339-8}, doi = {10.1007/978-3-319-49340-4}, author = {Hartwig Anzt and Jack Dongarra and Mark Gates and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @techreport {1175, title = {C++ API for Batch BLAS}, journal = {SLATE Working Notes}, number = {04, ICL-UT-17-12}, year = {2017}, month = {2017-12}, publisher = {University of Tennessee}, author = {Ahmad Abdelfattah and Konstantin Arturov and Cris Cecka and Jack Dongarra and Chip Freitag and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Panruo Wu} } @techreport {1081, title = {C++ API for BLAS and LAPACK}, journal = {SLATE Working Notes}, number = {02, ICL-UT-17-03}, year = {2017}, note = {Revision 02-21-2018}, month = {2017-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Mark Gates and Piotr Luszczek and Ahmad Abdelfattah and Jakub Kurzak and Jack Dongarra and Konstantin Arturov and Cris Cecka and Chip Freitag} } @techreport {1138, title = {The Case for Directive Programming for Accelerator Autotuner Optimization}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-07}, year = {2017}, month = {2017-10}, publisher = {University of Tennessee}, abstract = {In this work, we present the use of compiler pragma directives for parallelizing autotuning of specialized compute kernels for hardware accelerators. A set of constructs, that include prallelizing a source code that prune a generated search space with a large number of constraints for an autotunning infrastructure. For a better performance we studied optimization aimed at minimization of the run time.We also studied the behavior of the parallel load balance and the speedup on four different machines: x86, Xeon Phi, ARMv8, and POWER8.}, author = {Diana Fayad and Jakub Kurzak and Piotr Luszczek and Panruo Wu and Jack Dongarra} } @article {1131, title = {Comparing performance of s-step and pipelined GMRES on distributed-memory multicore CPUs}, year = {2017}, month = {2017-07}, publisher = {SIAM Annual Meeting}, address = {Pittsburgh, Pennsylvania}, author = {Ichitaro Yamazaki and Mark Hoemmen and Piotr Luszczek and Jack Dongarra} } @article {1165, title = {Design and Implementation of the PULSAR Programming System for Large Scale Computing}, journal = {Supercomputing Frontiers and Innovations}, volume = {4}, year = {2017}, abstract = {The objective of the PULSAR project was to design a programming model suitable for large scale machines with complex memory hierarchies, and to deliver a prototype implementation of a runtime system supporting that model. PULSAR tackled the challenge by proposing a programming model based on systolic processing and virtualization. The PULSAR programming model is quite simple, with point-to-point channels as the main communication abstraction. The runtime implementation is very lightweight and fully distributed, and provides multithreading, message-passing and multi-GPU offload capabilities. Performance evaluation shows good scalability up to one thousand nodes with one thousand GPU accelerators.}, doi = {10.14529/jsfi170101}, url = {http://superfri.org/superfri/article/view/121/210}, author = {Jakub Kurzak and Piotr Luszczek and Ichitaro Yamazaki and Yves Robert and Jack Dongarra} } @conference {1168, title = {The Design and Performance of Batched BLAS on Modern High-Performance Computing Systems}, booktitle = {International Conference on Computational Science (ICCS 2017)}, year = {2017}, month = {2017-06}, publisher = {Elsevier}, organization = {Elsevier}, address = {Z{\"u}rich, Switzerland}, abstract = {A current trend in high-performance computing is to decompose a large linear algebra problem into batches containing thousands of smaller problems, that can be solved independently, before collating the results. To standardize the interface to these routines, the community is developing an extension to the BLAS standard (the batched BLAS), enabling users to perform thousands of small BLAS operations in parallel whilst making efficient use of their hardware. We discuss the benefits and drawbacks of the current batched BLAS proposals and perform a number of experiments, focusing on a general matrix-matrix multiplication (GEMM), to explore their affect on the performance. In particular we analyze the effect of novel data layouts which, for example, interleave the matrices in memory to aid vectorization and prefetching of data. Utilizing these modifications our code outperforms both MKL1 CuBLAS2 by up to 6 times on the self-hosted Intel KNL (codenamed Knights Landing) and Kepler GPU architectures, for large numbers of double precision GEMM operations using matrices of size 2 {\texttimes} 2 to 20 {\texttimes} 20.}, keywords = {Batched BLAS, BLAS, High-performance computing, Memory management, Parallel processing, Scientific computing}, doi = {DOI:10.1016/j.procs.2017.05.138}, author = {Jack Dongarra and Sven Hammarling and Nicholas J. Higham and Samuel Relton and Pedro Valero-Lara and Mawussi Zounon} } @techreport {1133, title = {Designing SLATE: Software for Linear Algebra Targeting Exascale}, journal = {SLATE Working Notes}, number = {03, ICL-UT-17-06}, year = {2017}, month = {2017-10}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Jakub Kurzak and Panruo Wu and Mark Gates and Ichitaro Yamazaki and Piotr Luszczek and Gerald Ragghianti and Jack Dongarra} } @inproceedings {1183, title = {Dynamic Task Discovery in PaRSEC- A data-flow task-based Runtime}, journal = {ScalA17}, year = {2017}, month = {2017-09}, publisher = {ACM}, address = {Denver}, abstract = {Successfully exploiting distributed collections of heterogeneous many-cores architectures with complex memory hierarchy through a portable programming model is a challenge for application developers. The literature is not short of proposals addressing this problem, including many evolutionary solutions that seek to extend the capabilities of current message passing paradigms with intranode features (MPI+X). A different, more revolutionary, solution explores data-flow task-based runtime systems as a substitute to both local and distributed data dependencies management. The solution explored in this paper, PaRSEC, is based on such a programming paradigm, supported by a highly efficient task-based runtime. This paper compares two programming paradigms present in PaRSEC, Parameterized Task Graph (PTG) and Dynamic Task Discovery (DTD) in terms of capabilities, overhead and potential benefits.}, keywords = {data-flow, dynamic task-graph, parsec, task-based runtime}, isbn = {978-1-4503-5125-6}, doi = {10.1145/3148226.3148233}, url = {https://dl.acm.org/citation.cfm?doid=3148226.3148233}, author = {Reazul Hoque and Thomas Herault and George Bosilca and Jack Dongarra} } @article {1103, title = {Factorization and Inversion of a Million Matrices using GPUs: Challenges and Countermeasures}, journal = {Procedia Computer Science}, volume = {108}, year = {2017}, month = {2017-06}, pages = {606{\textendash}615}, abstract = {This paper presents new algorithmic approaches and optimization techniques for LU factorization and matrix inversion of millions of very small matrices using GPUs. These problems appear in many scientific applications including astrophysics and generation of block-Jacobi preconditioners. We show that, for very small problem sizes, design and optimization of GPU kernels require a mindset different from the one usually used when designing LAPACK algorithms for GPUs. Techniques for optimal memory traffic, register blocking, and tunable concurrency are incorporated in our proposed design. We also take advantage of the small matrix sizes to eliminate the intermediate row interchanges in both the factorization and inversion kernels. The proposed GPU kernels achieve performance speedups vs. CUBLAS of up to 6{\texttimes} for the factorization, and 14{\texttimes} for the inversion, using double precision arithmetic on a Pascal P100 GPU.}, doi = {https://doi.org/10.1016/j.procs.2017.05.250}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1102, title = {Fast Cholesky Factorization on GPUs for Batch and Native Modes in MAGMA}, journal = {Journal of Computational Science}, volume = {20}, year = {2017}, month = {2017-05}, pages = {85{\textendash}93}, abstract = {This paper presents a GPU-accelerated Cholesky factorization for two different modes of operation. The first one is the batch mode, where many independent factorizations on small matrices can be performed concurrently. This mode supports fixed size and variable size problems, and is found in many scientific applications. The second mode is the native mode, where one factorization is performed on a large matrix without any CPU involvement, which allows the CPU do other useful work. We show that, despite the different workloads, both modes of operation share a common code-base that uses the GPU only. We also show that the developed routines achieve significant speedups against a multicore CPU using the MKL library, and against a GPU implementation by cuSOLVER. This work is part of the MAGMA library.}, keywords = {GPU computing; Cholesky factorization; Batched execution}, doi = {https://doi.org/10.1016/j.jocs.2016.12.009}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1163, title = {Flexible Batched Sparse Matrix Vector Product on GPUs}, year = {2017}, month = {2017-11}, publisher = {ScalA{\textquoteright}17: 8th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, address = {Denver, Colorado}, author = {Hartwig Anzt and Collins, Gary and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @conference {, title = {Flexible Batched Sparse Matrix-Vector Product on GPUs}, booktitle = {8th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA {\textquoteright}17)}, year = {2017}, month = {2017-11}, publisher = {ACM Press}, organization = {ACM Press}, address = {Denver, CO}, abstract = { We propose a variety of batched routines for concurrently processing a large collection of small-size, independent sparse matrix-vector products (SpMV) on graphics processing units (GPUs). These batched SpMV kernels are designed to be flexible in order to handle a batch of matrices which differ in size, nonzero count, and nonzero distribution. Furthermore, they support three most commonly used sparse storage formats: CSR, COO and ELL. Our experimental results on a state-of-the-art GPU reveal performance improvements of up to 25X compared to non-batched SpMV routines.}, doi = {http://dx.doi.org/10.1145/3148226.3148230}, author = {Hartwig Anzt and Gary Collins and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @article {1083, title = {A Framework for Out of Memory SVD Algorithms}, journal = {ISC High Performance 2017}, year = {2017}, month = {2017-06}, pages = {158{\textendash}178}, abstract = {Many important applications {\textendash} from big data analytics to information retrieval, gene expression analysis, and numerical weather prediction {\textendash} require the solution of large dense singular value decompositions (SVD). In many cases the problems are too large to fit into the computer{\textquoteright}s main memory, and thus require specialized out-of-core algorithms that use disk storage. In this paper, we analyze the SVD communications, as related to hierarchical memories, and design a class of algorithms that minimizes them. This class includes out-of-core SVDs but can also be applied between other consecutive levels of the memory hierarchy, e.g., GPU SVD using the CPU memory for large problems. We call these out-of-memory (OOM) algorithms. To design OOM SVDs, we first study the communications for both classical one-stage blocked SVD and two-stage tiled SVD. We present the theoretical analysis and strategies to design, as well as implement, these communication avoiding OOM SVD algorithms. We show performance results for multicore architecture that illustrate our theoretical findings and match our performance models.}, doi = {https://doi.org/10.1007/978-3-319-58667-0_9}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Aurelien Bouteiller and Jack Dongarra} } @conference {1142, title = {High-performance Cholesky Factorization for GPU-only Execution}, booktitle = {Proceedings of the General Purpose GPUs (GPGPU-10)}, year = {2017}, month = {2017-02}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {We present our performance analysis, algorithm designs, and the optimizations needed for the development of high-performance GPU-only algorithms, and in particular, for the dense Cholesky factorization. In contrast to currently promoted designs that solve parallelism challenges on multicore architectures by representing algorithms as Directed Acyclic Graphs (DAGs), where nodes are tasks of fine granularity and edges are the dependencies between the tasks, our designs explicitly target manycore architectures like GPUs and feature coarse granularity tasks (that can be hierarchically split into fine grain data-parallel subtasks). Furthermore, in contrast to hybrid algorithms that schedule difficult to parallelize tasks on CPUs, we develop highly-efficient code for entirely GPU execution. GPU-only codes remove the expensive CPU-to-GPU communications and the tuning challenges related to slow CPU and/or low CPU-to-GPU bandwidth. We show that on latest GPUs, like the P100, this becomes so important that the GPU-only code even outperforms the hybrid MAGMA algorithms when the CPU tasks and communications can not be entirely overlapped with GPU computations. We achieve up to 4,300 GFlop/s in double precision on a P100 GPU, which is about 7-8{\texttimes} faster than high-end multicore CPUs, e.g., two 10-cores Intel Xeon E5-2650 v3 Haswell CPUs, where MKL runs up to about 500-600 Gflop/s. The new algorithm also outperforms significantly the GPU-only implementation currently available in the NVIDIA cuSOLVER library.}, doi = {https://doi.org/10.1145/3038228.3038237}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @inproceedings {1011, title = {Improving Performance of GMRES by Reducing Communication and Pipelining Global Collectives}, journal = {Proceedings of The 18th IEEE International Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2017), Best Paper Award}, year = {2017}, month = {2017-06}, address = {Orlando, FL}, abstract = {We compare the performance of pipelined and s-step GMRES, respectively referred to as l-GMRES and s-GMRES, on distributed multicore CPUs. Compared to standard GMRES, s-GMRES requires fewer all-reduces, while l-GMRES overlaps the all-reduces with computation. To combine the best features of two algorithms, we propose another variant, (l, t)-GMRES, that not only does fewer global all-reduces than standard GMRES, but also overlaps those all-reduces with other work. We implemented the thread-parallelism and communication-overlap in two different ways. The first uses nonblocking MPI collectives with thread-parallel computational kernels. The second relies on a shared-memory task scheduler. In our experiments, (l, t)-GMRES performed better than l-GMRES by factors of up to 1.67{\texttimes}. In addition, though we only used 50 nodes, when the latency cost became significant, our variant performed up to 1.22{\texttimes} better than s-GMRES by hiding all-reduces.}, doi = {https://doi.org/10.1109/IPDPSW.2017.65}, author = {Ichitaro Yamazaki and Mark Hoemmen and Piotr Luszczek and Jack Dongarra} } @conference {1140, title = {Investigating Half Precision Arithmetic to Accelerate Dense Linear System Solvers}, booktitle = {ScalA17: 8th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2017}, month = {11/2017}, publisher = {ACM}, organization = {ACM}, address = {Denver, CO}, abstract = {The use of low-precision arithmetic in mixed-precision computing methods has been a powerful tool to accelerate numerous scientific computing applications. Artificial intelligence (AI) in particular has pushed this to current extremes, making use of half-precision floating-point arithmetic (FP16) in approaches based on neural networks. The appeal of FP16 is in the high performance that can be achieved using it on today{\textquoteright}s powerful manycore GPU accelerators, e.g., like the NVIDIA V100, that can provide 120 TeraFLOPS alone in FP16. We present an investigation showing that other HPC applications can harness this power too, and in particular, the general HPC problem of solving Ax = b, where A is a large dense matrix, and the solution is needed in FP32 or FP64 accuracy. Our approach is based on the mixed-precision iterative refinement technique {\textendash} we generalize and extend prior advances into a framework, for which we develop architecture-specific algorithms and highly-tuned implementations that resolve the main computational challenges of efficiently parallelizing, scaling, and using FP16 arithmetic in the approach on high-end GPUs. Subsequently, we show for a first time how the use of FP16 arithmetic can significantly accelerate, as well as make more energy efficient, FP32 or FP64-precision Ax = b solvers. Our results are reproducible and the developments will be made available through the MAGMA library. We quantify in practice the performance, and limitations of the approach.}, author = {Azzam Haidar and Panruo Wu and Stanimire Tomov and Jack Dongarra} } @techreport {1182, title = {LAWN 294: Aasen{\textquoteright}s Symmetric Inde nite Linear Solvers in LAPACK}, journal = {LAPACK Working Note}, number = {LAWN 294, ICL-UT-17-13}, year = {2017}, month = {2017-12}, publisher = {University of Tennessee}, abstract = {Recently, we released two LAPACK subroutines that implement Aasen{\textquoteright}s algorithms for solving a symmetric indefinite linear system of equations. The first implementation is based on a partitioned right-looking variant of Aasen{\textquoteright}s algorithm (the column-wise left-looking panel factorization, followed by the right-looking trailing submatrix update using the panel). The second implements the two-stage left-looking variant of the algorithm (the block-wise left- looking algorithm that reduces the matrix to the symmetric band form, followed by the band LU factorization). In this report, we discuss our implementations and present our experimental results to compare the stability and performance of these two new solvers with those of the other two symmetric indefinite solvers in LAPACK (i.e., the Bunch-Kaufman and rook pivoting algorithms).}, author = {Ichitaro Yamazaki and Jack Dongarra} } @article {1166, title = {A Look Back on 30 Years of the Gordon Bell Prize}, journal = {International Journal of High Performance Computing and Networking}, volume = {31}, year = {2017}, pages = {469{\textendash}484}, abstract = {The Gordon Bell Prize is awarded each year by the Association for Computing Machinery to recognize outstanding achievement in high-performance computing (HPC). The purpose of the award is to track the progress of parallel computing with particular emphasis on rewarding innovation in applying HPC to applications in science, engineering, and large-scale data analytics. Prizes may be awarded for peak performance or special achievements in scalability and time-to-solution on important science and engineering problems. Financial support for the US$10,000 award is provided through an endowment by Gordon Bell, a pioneer in high-performance and parallel computing. This article examines the evolution of the Gordon Bell Prize and the impact it has had on the field.}, keywords = {benchmarks, Computational Science, Gordon Bell Prize, High Performance Computing, HPC Cost-Performance, HPC Progress, HPC Recognition, HPC special hardware, HPPC Award. HPC Prize, Technical Computing}, url = {http://journals.sagepub.com/doi/10.1177/1094342017738610}, author = {Gordon Bell and David Bailey and Alan H. Karp and Jack Dongarra and Kevin Walsh} } @article {1337, title = {MagmaDNN {\textendash} High-Performance Data Analytics for Manycore GPUs and CPUs}, year = {2017}, month = {2017-12}, publisher = {2017 Summer Research Experiences for Undergraduate (REU), Presentation}, address = {Knoxville, TN}, author = {Lucien Ng and Kwai Wong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @techreport {1130, title = {MAGMA-sparse Interface Design Whitepaper}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-05}, year = {2017}, month = {2017-09}, type = {Technical Report}, abstract = {In this report we describe the logic and interface we develop for the MAGMA-sparse library to allow for easy integration as third-party library into a top-level software ecosystem. The design choices are based on extensive consultation with other software library developers, in particular the Trilinos software development team. The interface documentation is at this point not exhaustive, but a first proposal for setting a standard. Although the interface description targets the MAGMA-sparse software module, we hope that the design choices carry beyond this specific library, and are attractive for adoption in other packages. This report is not intended as static document, but will be updated over time to reflect the agile software development in the ECP 1.3.3.11 STMS11-PEEKS project.}, author = {Hartwig Anzt and Erik Boman and Jack Dongarra and Goran Flegar and Mark Gates and Mike Heroux and Mark Hoemmen and Jakub Kurzak and Piotr Luszczek and Sivasankaran Rajamanickam and Stanimire Tomov and Stephen Wood and Ichitaro Yamazaki} } @conference {1084, title = {Novel HPC Techniques to Batch Execution of Many Variable Size BLAS Computations on GPUs}, booktitle = {International Conference on Supercomputing (ICS {\textquoteright}17)}, year = {2017}, month = {2017-06}, publisher = {ACM}, organization = {ACM}, address = {Chicago, Illinois}, doi = {10.1145/3079079.3079103}, url = {http://dl.acm.org/citation.cfm?id=3079103}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1170, title = {Optimized Batched Linear Algebra for Modern Architectures}, booktitle = {Euro-Par 2017}, year = {2017}, month = {2017-08}, publisher = {Springer}, organization = {Springer}, address = {Santiago de Compostela, Spain}, abstract = {Solving large numbers of small linear algebra problems simultaneously is becoming increasingly important in many application areas. Whilst many researchers have investigated the design of efficient batch linear algebra kernels for GPU architectures, the common approach for many/multi-core CPUs is to use one core per subproblem in the batch. When solving batches of very small matrices, 2 {\texttimes} 2 for example, this design exhibits two main issues: it fails to fully utilize the vector units and the cache of modern architectures, since the matrices are too small. Our approach to resolve this is as follows: given a batch of small matrices spread throughout the primary memory, we first reorganize the elements of the matrices into a contiguous array, using a block interleaved memory format, which allows us to process the small independent problems as a single large matrix problem and enables cross-matrix vectorization. The large problem is solved using blocking strategies that attempt to optimize the use of the cache. The solution is then converted back to the original storage format. To explain our approach we focus on two BLAS routines: general matrix-matrix multiplication (GEMM) and the triangular solve (TRSM). We extend this idea to LAPACK routines using the Cholesky factorization and solve (POSV). Our focus is primarily on very small matrices ranging in size from 2 {\texttimes} 2 to 32 {\texttimes} 32. Compared to both MKL and OpenMP implementations, our approach can be up to 4 times faster for GEMM, up to 14 times faster for TRSM, and up to 40 times faster for POSV on the new Intel Xeon Phi processor, code-named Knights Landing (KNL). Furthermore, we discuss strategies to avoid data movement between sockets when using our interleaved approach on a NUMA node.}, doi = {https://doi.org/10.1007/978-3-319-64203-1_37}, author = {Jack Dongarra and Sven Hammarling and Nicholas J. Higham and Samuel Relton and Mawussi Zounon} } @conference {1085, title = {Optimizing the SVD Bidiagonalization Process for a Batch of Small Matrices}, booktitle = {International Conference on Computational Science (ICCS 2017)}, year = {2017}, month = {2017-06}, publisher = {Procedia Computer Science}, organization = {Procedia Computer Science}, address = {Zurich, Switzerland}, abstract = {A challenging class of problems arising in many GPU applications, called batched problems, involves linear algebra operations on many small-sized matrices. We designed batched BLAS (Basic Linear Algebra Subroutines) routines, and in particular the Level-2 BLAS GEMV and the Level-3 BLAS GEMM routines, to solve them. We proposed device functions and big-tile settings in our batched BLAS design. We adopted auto-tuning to optimize different instances of GEMV routines. We illustrated our batched BLAS approach to optimize batched bi-diagonalization progressively on a K40c GPU. The optimization techniques in this paper are applicable to the other two-sided factorizations as well.}, doi = {https://doi.org/10.1016/j.procs.2017.05.237}, url = {http://www.sciencedirect.com/science/article/pii/S1877050917308645}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1141, title = {Out of Memory SVD Solver for Big Data}, booktitle = {2017 IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}17)}, year = {2017}, month = {2017-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {Many applications {\textendash} from data compression to numerical weather prediction and information retrieval {\textendash} need to compute large dense singular value decompositions (SVD). When the problems are too large to fit into the computer{\textquoteright}s main memory, specialized out-of-core algorithms that use disk storage are required. A typical example is when trying to analyze a large data set through tools like MATLAB or Octave, but the data is just too large to be loaded. To overcome this, we designed a class of out-of-memory (OOM) algorithms to reduce, as well as overlap communication with computation. Of particular interest is OOM algorithms for matrices of size m{\texttimes}n, where m >> n or m << n, e.g., corresponding to cases of too many variables, or too many observations. To design OOM SVDs, we first study the communications cost for the SVD techniques as well as for the QR/LQ factorization followed by SVD. We present the theoretical analysis about the data movement cost and strategies to design OOM SVD algorithms. We show performance results for multicore architecture that illustrate our theoretical findings and match our performance models. Moreover, our experimental results show the feasibility and superiority of the OOM SVD.}, author = {Azzam Haidar and Khairul Kabir and Diana Fayad and Stanimire Tomov and Jack Dongarra} } @techreport {1173, title = {PLASMA 17 Performance Report}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-11}, year = {2017}, month = {2017-06}, publisher = {University of Tennessee}, abstract = {PLASMA (Parallel Linear Algebra for Multicore Architectures) is a dense linear algebra package at the forefront of multicore computing. PLASMA is designed to deliver the highest possible performance from a system with multiple sockets of multicore processors. PLASMA achieves this objective by combining state of the art solutions in parallel algorithms, scheduling, and software engineering. PLASMA currently offers a collection of routines for solving linear systems of equations and least square problems.}, author = {Maksims Abalenkovs and Negin Bagherpour and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Samuel Relton and Jakub Sistek and David Stevens and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Mawussi Zounon} } @techreport {1172, title = {PLASMA 17.1 Functionality Report}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-10}, year = {2017}, month = {2017-06}, publisher = {University of Tennessee}, abstract = {PLASMA (Parallel Linear Algebra for Multicore Architectures) is a dense linear algebra package at the forefront of multicore computing. PLASMA is designed to deliver the highest possible performance from a system with multiple sockets of multicore processors. PLASMA achieves this objective by combining state of the art solutions in parallel algorithms, scheduling, and software engineering. PLASMA currently offers a collection of routines for solving linear systems of equations and least square problems.}, author = {Maksims Abalenkovs and Negin Bagherpour and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Samuel Relton and Jakub Sistek and David Stevens and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Mawussi Zounon} } @techreport {1171, title = {POMPEI: Programming with OpenMP4 for Exascale Investigations}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-09}, year = {2017}, month = {2017-12}, publisher = {University of Tennessee}, abstract = {The objective of the Programming with OpenMP4 for Exascale Investigations (POMPEI) project is to explore new task-based programming techniques together with data structure centric programming for scientific applications to harness the potential of extreme-scale systems. Tasking is a well established by now approach on such systems as it has been used successfully to handle their large-scale parallelism and heterogeneity, which are leading challenges on the way to exascale computing. The approach is to harness the latest features of OpenMP4.5 and OpenACC2.5 to design abstractions shared among tasks and mapped efficiently to data-structure driven programming paradigms. This technical report describes the approach, along with its reference implementation and results for dense linear algebra algorithms.}, author = {Jack Dongarra and Azzam Haidar and Oscar Hernandez and Stanimire Tomov and Manjunath Gorentla Venkata} } @conference {1134, title = {Power-aware Computing: Measurement, Control, and Performance Analysis for Intel Xeon Phi}, booktitle = {2017 IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}17), Best Paper Finalist}, year = {2017}, month = {2017-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {The emergence of power efficiency as a primary constraint in processor and system designs poses new challenges concerning power and energy awareness for numerical libraries and scientific applications. Power consumption also plays a major role in the design of data centers in particular for peta- and exa- scale systems. Understanding and improving the energy efficiency of numerical simulation becomes very crucial. We present a detailed study and investigation toward control- ling power usage and exploring how different power caps affect the performance of numerical algorithms with different computa- tional intensities, and determine the impact and correlation with performance of scientific applications. Our analyses is performed using a set of representatives kernels, as well as many highly used scientific benchmarks. We quantify a number of power and performance measurements, and draw observations and conclusions that can be viewed as a roadmap toward achieving energy efficiency computing algorithms.}, doi = {https://doi.org/10.1109/HPEC.2017.8091085}, author = {Azzam Haidar and Heike Jagode and Asim YarKhan and Phil Vaccaro and Stanimire Tomov and Jack Dongarra} } @article {1338, title = {Power-Aware HPC on Intel Xeon Phi KNL Processors}, year = {2017}, month = {2017-06}, publisher = {ISC High Performance (ISC17), Intel Booth Presentation}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Heike Jagode and Asim YarKhan and Phil Vaccaro and Stanimire Tomov and Jack Dongarra} } @article {1067, title = {Preconditioned Krylov Solvers on GPUs}, journal = {Parallel Computing}, year = {2017}, month = {2017-06}, abstract = {In this paper, we study the effect of enhancing GPU-accelerated Krylov solvers with preconditioners. We consider the BiCGSTAB, CGS, QMR, and IDR(s) Krylov solvers. For a large set of test matrices, we assess the impact of Jacobi and incomplete factorization preconditioning on the solvers{\textquoteright} numerical stability and time-to-solution performance. We also analyze how the use of a preconditioner impacts the choice of the fastest solver.}, keywords = {gpu, ILU, Jacobi, Krylov solvers, Preconditioning}, issn = {01678191}, doi = {10.1016/j.parco.2017.05.006}, url = {http://www.sciencedirect.com/science/article/pii/S0167819117300777}, author = {Hartwig Anzt and Mark Gates and Jack Dongarra and Moritz Kreutzer and Gerhard Wellein and Martin Kohler} } @techreport {1129, title = {Report on the TianHe-2A System}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-04}, year = {2017}, month = {2017-09}, publisher = {University of Tennessee}, abstract = {The TianHe-2A (TH-2A) compute system, designed by China{\textquoteright}s National University of Defense Technology (NUDT), is an upgrade of the TianHe-2 (TH-2) system. TianHe is sometimes referred to as {\textquotedblleft}Milkyway,{\textquotedblright} and the latest iteration of this system is currently undergoing assembly and testing at China{\textquoteright}s National Supercomputer Center in Guangzhou (NSCC-GZ). At the time of this report, the system is 25\% complete and should be fully functional by November 2017. The most significant enhancement to the system is the upgrade to the TianHe-2 nodes; the old Intel Xeon Phi Knights Corner (KNC) accelerators will be replaced with a proprietary accelerator called the Matrix-2000. In addition, the network has been enhanced, the memory increased, and the number of cabinets expanded. The completed system, when fully integrated with 4,981,760 cores and 3.4 PB of primary memory, will have a theoretical peak performance of 94.97 Pflop/s, which is roughly double the performance of the existing TianHe-2 system. NUDT also developed the heterogeneous programming environment for the Matrix-20002 with support for OpenMP and OpenCL.}, author = {Jack Dongarra} } @techreport {1080, title = {Roadmap for the Development of a Linear Algebra Library for Exascale Computing: SLATE: Software for Linear Algebra Targeting Exascale}, journal = {SLATE Working Notes}, number = {01, ICL-UT-17-02}, year = {2017}, month = {2017-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Ahmad Abdelfattah and Hartwig Anzt and Aurelien Bouteiller and Anthony Danalis and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Stephen Wood and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan} } @conference {1164, title = {Sampling Algorithms to Update Truncated SVD}, booktitle = {IEEE International Conference on Big Data}, year = {2017}, month = {2017-12}, publisher = {IEEE}, organization = {IEEE}, address = {Boston, MA}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @conference {, title = {Scaling Point Set Registration in 3D Across Thread Counts on Multicore and Hardware Accelerator Platforms through Autotuning for Large Scale Analysis of Scientific Point Clouds}, booktitle = {IEEE International Workshop on Benchmarking, Performance Tuning and Optimization for Big Data Applications (BPOD 2017)}, year = {2017}, month = {2017-12}, publisher = {IEEE}, organization = {IEEE}, address = {Boston, MA}, abstract = {In this article, we present an autotuning approach applied to systematic performance engineering of the EM-ICP (Expectation-Maximization Iterative Closest Point) algorithm for the point set registration problem. We show how we were able to exceed the performance achieved by the reference code through multiple dependence transformations and automated procedure of generating and evaluating numerous implementation variants. Furthermore, we also managed to exploit code transformations that are not that common during manual optimization but yielded better performance in our tests for the EM-ICP algorithm. Finally, we maintained high levels of performance rate in a portable fashion across a wide range of HPC hardware platforms including multicore, many-core, and GPU-based accelerators. More importantly, the results indicate consistently high performance level and ability to move the task of data analysis through point-set registration to any modern compute platform without the concern of inferior asymptotic efficiency.}, doi = {https://doi.org/10.1109/BigData.2017.8258258}, author = {Piotr Luszczek and Jakub Kurzak and Ichitaro Yamazaki and David Keffer and Jack Dongarra} } @techreport {1082, title = {Small Tensor Operations on Advanced Architectures for High-Order Applications}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-17-749}, year = {2017}, month = {2017-04}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @article {1019, title = {Solving Dense Symmetric Indefinite Systems using GPUs}, journal = {Concurrency and Computation: Practice and Experience}, volume = {29}, year = {2017}, month = {2017-03}, abstract = {This paper studies the performance of different algorithms for solving a dense symmetric indefinite linear system of equations on multicore CPUs with a Graphics Processing Unit (GPU). To ensure the numerical stability of the factorization, pivoting is required. Obtaining high performance of such algorithms on the GPU is difficult because all the existing pivoting strategies lead to frequent synchronizations and irregular data accesses. Until recently, there has not been any implementation of these algorithms on a hybrid CPU/GPU architecture. To improve their performance on the hybrid architecture, we explore different techniques to reduce the expensive data transfer and synchronization between the CPU and GPU, or on the GPU (e.g., factorizing the matrix entirely on the GPU or in a communication-avoiding fashion). We also study the performance of the solver using iterative refinements along with the factorization without pivoting combined with the preprocessing technique based on random butterfly transformations, or with the mixed-precision algorithm where the matrix is factorized in single precision. This randomization algorithm only has a probabilistic proof on the numerical stability, and for this paper, we only focused on the mixed-precision algorithm without pivoting. However, they demonstrate that we can obtain good performance on the GPU by avoiding the pivoting and using the lower precision arithmetics, respectively. As illustrated with the application in acoustics studied in this paper, in many practical cases, the matrices can be factorized without pivoting. Because the componentwise backward error computed in the iterative refinement signals when the algorithm failed to obtain the desired accuracy, the user can use these potentially unstable but efficient algorithms in most of the cases and fall back to a more stable algorithm with pivoting only in the case of the failure.}, doi = {10.1002/cpe.4055}, url = {http://onlinelibrary.wiley.com/doi/10.1002/cpe.4055/full}, author = {Marc Baboulin and Jack Dongarra and Adrien Remy and Stanimire Tomov and Ichitaro Yamazaki} } @article {1086, title = {Structure-aware Linear Solver for Realtime Convex Optimization for Embedded Systems}, journal = {IEEE Embedded Systems Letters}, volume = {9}, year = {2017}, month = {2017-05}, pages = {61{\textendash}64}, abstract = {With the increasing sophistication in the use of optimization algorithms such as deep learning on embedded systems, the convex optimization solvers on embedded systems have found widespread use. This letter presents a novel linear solver technique to reduce the run-time of convex optimization solver by using the property that some parameters are fixed during the solution iterations of a solve instance. Our experimental results show that the run-time can be reduced by two orders of magnitude.}, keywords = {Karush Kuhn Tucker (KKT), Realtime embedded convex optimization solver}, doi = {10.1109/LES.2017.2700401}, url = {http://ieeexplore.ieee.org/document/7917357/}, author = {Ichitaro Yamazaki and Saeid Nooshabadi and Stanimire Tomov and Jack Dongarra} } @conference {, title = {Towards Numerical Benchmark for Half-Precision Floating Point Arithmetic}, booktitle = {2017 IEEE High Performance Extreme Computing Conference (HPEC)}, year = {2017}, month = {2017-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {With NVIDA Tegra Jetson X1 and Pascal P100 GPUs, NVIDIA introduced hardware-based computation on FP16 numbers also called half-precision arithmetic. In this talk, we will introduce the steps required to build a viable benchmark for this new arithmetic format. This will include the connections to established IEEE floating point standards and existing HPC benchmarks. The discussion will focus on performance and numerical stability issues that are important for this kind of benchmarking and how they relate to NVIDIA platforms.}, doi = {https://doi.org/10.1109/HPEC.2017.8091031}, author = {Piotr Luszczek and Jakub Kurzak and Ichitaro Yamazaki and Jack Dongarra} } @inproceedings {1088, title = {Variable-Size Batched Gauss-Huard for Block-Jacobi Preconditioning}, journal = {International Conference on Computational Science (ICCS 2017)}, volume = {108}, year = {2017}, month = {2017-06}, pages = {1783-1792}, publisher = {Procedia Computer Science}, address = {Zurich, Switzerland}, abstract = {In this work we present new kernels for the generation and application of block-Jacobi precon-ditioners that accelerate the iterative solution of sparse linear systems on graphics processing units (GPUs). Our approach departs from the conventional LU factorization and decomposes the diagonal blocks of the matrix using the Gauss-Huard method. When enhanced with column pivoting, this method is as stable as LU with partial/row pivoting. Due to extensive use of GPU registers and integration of implicit pivoting, our variable size batched Gauss-Huard implementation outperforms the batched version of LU factorization. In addition, the application kernel combines the conventional two-stage triangular solve procedure, consisting of a backward solve followed by a forward solve, into a single stage that performs both operations simultaneously.}, doi = {https://doi.org/10.1016/j.procs.2017.05.186}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti and Andres E. Thomas} } @conference {1160, title = {Variable-Size Batched LU for Small Matrices and Its Integration into Block-Jacobi Preconditioning}, booktitle = {46th International Conference on Parallel Processing (ICPP)}, year = {2017}, month = {2017-08}, publisher = {IEEE}, organization = {IEEE}, address = {Bristol, United Kingdom}, abstract = {We present a set of new batched CUDA kernels for the LU factorization of a large collection of independent problems of different size, and the subsequent triangular solves. All kernels heavily exploit the registers of the graphics processing unit (GPU) in order to deliver high performance for small problems. The development of these kernels is motivated by the need for tackling this embarrassingly parallel scenario in the context of block-Jacobi preconditioning that is relevant for the iterative solution of sparse linear systems.}, keywords = {graphics processing units, Jacobian matrices, Kernel, linear systems, Parallel processing, Sparse matrices}, doi = {10.1109/ICPP.2017.18}, url = {http://ieeexplore.ieee.org/abstract/document/8025283/?reload=true}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @article {, title = {With Extreme Computing, the Rules Have Changed}, journal = {Computing in Science \& Engineering}, volume = {19}, year = {2017}, month = {2017-05}, pages = {52-62}, abstract = {On the eve of exascale computing, traditional wisdom no longer applies. High-performance computing is gone as we know it. This article discusses a range of new algorithmic techniques emerging in the context of exascale computing, many of which defy the common wisdom of high-performance computing and are considered unorthodox, but could turn out to be a necessity in near future.}, doi = {https://doi.org/10.1109/MCSE.2017.48}, author = {Jack Dongarra and Stanimire Tomov and Piotr Luszczek and Jakub Kurzak and Mark Gates and Ichitaro Yamazaki and Hartwig Anzt and Azzam Haidar and Ahmad Abdelfattah} } @techreport {1174, title = {2016 Dense Linear Algebra Software Packages Survey}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-744 / LAWN 290}, year = {2016}, month = {2016-09}, publisher = {University of Tennessee}, abstract = {The 2016 Dense Linear Algebra Software Packages Survey was administered from January 1st 2016 to April 12 2016. 234 respondents answered the survey. The survey was advertised directly to the Linear Algebra community via our LAPACK/ScaLAPACK forum, NA Digest and we also directly contacted vendors and linear algebra experts. The breakdown of respondents was: 74\% researchers or scientists, 25\% were Principal Investigators and 25\% Software maintainers or System administrators. The goal of the survey was to get the Linear Algebra community opinion and provide input on dense linear algebra software packages, in particular LAPACK, ScaLAPACK, PLASMA and MAGMA. The ultimate purpose of the survey was to improve these libraries to benefit our user community. The survey would allow the team to prioritize the many possible improvements that could be done. We also asked input from users accessing these libraries via 3rd party interfaces, for example MATLAB, Intel{\textquoteright}s MKL, Python{\textquoteright}s NumPy, AMD{\textquoteright}s ACML, and many others.}, author = {Jack Dongarra and Jim Demmel and Julien Langou and Julie Langou} } @article {1342, title = {Accelerating Tensor Contractions for High-Order FEM on CPUs, GPUs, and KNLs}, year = {2016}, month = {2016-09}, publisher = {moky Mountains Computational Sciences and Engineering Conference (SMC16), Poster}, address = {Gatlinburg, TN}, author = {Azzam Haidar and Ahmad Abdelfattah and Veselin Dobrev and Ian Karlin and Tzanio Kolev and Stanimire Tomov and Jack Dongarra} } @article {994, title = {Accelerating the Conjugate Gradient Algorithm with GPU in CFD Simulations}, journal = {VECPAR}, year = {2016}, abstract = {This paper illustrates how GPU computing can be used to accelerate computational fluid dynamics (CFD) simulations. For sparse linear systems arising from finite volume discretization, we evaluate and optimize the performance of Conjugate Gradient (CG) routines designed for manycore accelerators and compare against an industrial CPU-based implementation. We also investigate how the recent advances in preconditioning, such as iterative Incomplete Cholesky (IC, as symmetric case of ILU) preconditioning, match the requirements for solving real world problems.}, url = {http://hgpu.org/?p=16264}, author = {Hartwig Anzt and Marc Baboulin and Jack Dongarra and Yvan Fournier and Frank Hulsemann and Amal Khabou and Yushan Wang} } @article {924, title = {Assessing the Cost of Redistribution followed by a Computational Kernel: Complexity and Performance Results}, journal = {Parallel Computing}, volume = {52}, year = {2016}, month = {2016-02}, pages = {22-41}, abstract = {The classical redistribution problem aims at optimally scheduling communications when reshuffling from an initial data distribution to a target data distribution. This target data distribution is usually chosen to optimize some objective for the algorithmic kernel under study (good computational balance or low communication volume or cost), and therefore to provide high efficiency for that kernel. However, the choice of a distribution minimizing the target objective is not unique. This leads to generalizing the redistribution problem as follows: find a re-mapping of data items onto processors such that the data redistribution cost is minimal, and the operation remains as efficient. This paper studies the complexity of this generalized problem. We compute optimal solutions and evaluate, through simulations, their gain over classical redistribution. We also show the NP-hardness of the problem to find the optimal data partition and processor permutation (defined by new subsets) that minimize the cost of redistribution followed by a simple computational kernel. Finally, experimental validation of the new redistribution algorithms are conducted on a multicore cluster, for both a 1D-stencil kernel and a more compute-intensive dense linear algebra routine.}, keywords = {Data partition, linear algebra, parsec, QR factorization, Redistribution, Stencil}, doi = {doi:10.1016/j.parco.2015.09.005}, author = {Julien Herrmann and George Bosilca and Thomas Herault and Loris Marchal and Yves Robert and Jack Dongarra} } @inproceedings {991, title = {Batched Generation of Incomplete Sparse Approximate Inverses on GPUs}, journal = {Proceedings of the 7th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2016}, month = {2016-11}, pages = {49{\textendash}56}, abstract = {Incomplete Sparse Approximate Inverses (ISAI) have recently been shown to be an attractive alternative to exact sparse triangular solves in the context of incomplete factorization preconditioning. In this paper we propose a batched GPU-kernel for the efficient generation of ISAI matrices. Utilizing only thread-local memory allows for computing the ISAI matrix with very small memory footprint. We demonstrate that this strategy is faster than the existing strategy for generating ISAI matrices, and use a large number of test matrices to assess the algorithm{\textquoteright}s efficiency in an iterative solver setting.}, isbn = {978-1-5090-5222-6}, doi = {10.1109/ScalA.2016.11}, author = {Hartwig Anzt and Edmond Chow and Thomas Huckle and Jack Dongarra} } @techreport {988, title = {On block-asynchronous execution on GPUs}, journal = {LAPACK Working Note}, number = {291}, year = {2016}, month = {2016-11}, abstract = {This paper experimentally investigates how GPUs execute instructions when used for general purpose computing (GPGPU). We use a light-weight realizing a vector operation to analyze which vector entries are updated subsequently, and identify regions where parallel execution can be expected. The results help us to understand how GPUs operate, and map this operation mode to the mathematical concept of asynchronism. In particular it helps to understand the effects that can occur when implementing a fixed-point method using in-place updates on GPU hardware.}, url = {http://www.netlib.org/lapack/lawnspdf/lawn291.pdf}, author = {Hartwig Anzt and Edmond Chow and Jack Dongarra} } @article {1344, title = {Cholesky Factorization on Batches of Matrices with Fixed and Variable Sizes}, year = {2016}, month = {2016-04}, publisher = {GPU Technology Conference (GTC16), Poster}, address = {San Jose, CA}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @techreport {978, title = {Context Identifier Allocation in Open MPI}, journal = {University of Tennessee Computer Science Technical Report}, number = { ICL-UT-16-01}, year = {2016}, month = {2016-01}, publisher = {Innovative Computing Laboratory, University of Tennessee}, abstract = {The concept of communicators is a central notion in Message Passing Interface, allowing on one side the MPI implemen- tation to specialize it{\textquoteright}s matching and deliver messages in the right context, and on the other side the library developers to contextualize their message exchanges, and scope different algorithms to well-defined groups of processes. More pre- cisely, all communication objects in MPI are derived from a communicator at some point. All MPI functions allowing the creation of new communicators have a collective mean- ing, either over the group of processes from the parent com- municator or those from the target communicator. Thus, the perfromance of the communicator creation is tied to the col- lective communication performance, as well as the amount of data needed to be exchanged in order to consistently create this new communicator. We introduce several communica- tor creation algorithms, and present their implementation in the context of Open MPI. We explore the performance of these new algorithms and compare them with state-of-the- art algorithms available in other MPI implementations.}, author = {George Bosilca and Thomas Herault and Jack Dongarra} } @inbook {883, title = {Dense Symmetric Indefinite Factorization on GPU Accelerated Architectures}, booktitle = {Lecture Notes in Computer Science}, series = {11th International Conference, PPAM 2015, Krakow, Poland, September 6-9, 2015. Revised Selected Papers, Part I}, volume = {9573}, year = {2016}, month = {2015-09}, pages = {86-95}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, chapter = {Parallel Processing and Applied Mathematics}, abstract = {We study the performance of dense symmetric indefinite factorizations (Bunch-Kaufman and Aasen{\textquoteright}s algorithms) on multicore CPUs with a Graphics Processing Unit (GPU). Though such algorithms are needed in many scientific and engineering simulations, obtaining high performance of the factorization on the GPU is difficult because the pivoting that is required to ensure the numerical stability of the factorization leads to frequent synchronizations and irregular data accesses. As a result, until recently, there has not been any implementation of these algorithms on hybrid CPU/GPU architectures. To improve their performance on the hybrid architecture, we explore different techniques to reduce the expensive communication and synchronization between the CPU and GPU, or on the GPU. We also study the performance of an LDL^T factorization with no pivoting combined with the preprocessing technique based on Random Butterfly Transformations. Though such transformations only have probabilistic results on the numerical stability, they avoid the pivoting and obtain a great performance on the GPU. }, keywords = {Communication-avoiding, Dense symmetric indefinite factorization, gpu computation, randomization}, isbn = {978-3-319-32149-3}, doi = {10.1007/978-3-319-32149-3_9}, author = {Marc Baboulin and Jack Dongarra and Adrien Remy and Stanimire Tomov and Ichitaro Yamazaki}, editor = {Roman Wyrzykowski and Ewa Deelman and Konrad Karczewski and Jacek Kitowski and Kazimierz Wiatr} } @conference {940, title = {On the Development of Variable Size Batched Computation for Heterogeneous Parallel Architectures}, booktitle = {The 17th IEEE International Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2016), IPDPS 2016}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {

Many scientific applications, ranging from national security to medical advances, require solving a number of relatively small-size independent problems. As the size of each individual problem does not provide sufficient parallelism for the underlying hardware, especially accelerators, these problems must be solved concurrently as a batch in order to saturate the hardware with enough work, hence the name batched computation. A possible simplification is to assume a uniform size for all problems. However, real applications do not necessarily satisfy such assumption. Consequently, an efficient solution for variable-size batched computations is required.

This paper proposes a foundation for high performance variable-size batched matrix computation based on Graphics Processing Units (GPUs). Being throughput-oriented processors, GPUs favor regular computation and less divergence among threads, in order to achieve high performance. Therefore, the development of high performance numerical software for this kind of problems is challenging. As a case study, we developed efficient batched Cholesky factorization algorithms for relatively small matrices of different sizes. However, most of the strategies and the software developed, and in particular a set of variable size batched BLAS kernels, can be used in many other dense matrix factorizations, large scale sparse direct multifrontal solvers, and applications. We propose new interfaces and mechanisms to handle the irregular computation pattern on the GPU. According to the authors{\textquoteright} knowledge, this is the first attempt to develop high performance software for this class of problems. Using a K40c GPU, our performance tests show speedups of up to 2:5 against two Sandy Bridge CPUs (8-core each) running Intel MKL library.

}, keywords = {batched computation, GPUs, variable small sizes}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @inproceedings {996, title = {Domain Overlap for Iterative Sparse Triangular Solves on GPUs}, journal = {Software for Exascale Computing - SPPEXA}, volume = {113}, year = {2016}, month = {2016-09}, pages = {527{\textendash}545}, publisher = {Springer International Publishing}, abstract = {Iterative methods for solving sparse triangular systems are an attractive alternative to exact forward and backward substitution if an approximation of the solution is acceptable. On modern hardware, performance benefits are available as iterative methods allow for better parallelization. In this paper, we investigate how block-iterative triangular solves can benefit from using overlap. Because the matrices are triangular, we use {\textquotedblleft}directed{\textquotedblright} overlap, depending on whether the matrix is upper or lower triangular. We enhance a GPU implementation of the block-asynchronous Jacobi method with directed overlap. For GPUs and other cases where the problem must be overdecomposed, i.e., more subdomains and threads than cores, there is a preference in processing or scheduling the subdomains in a specific order, following the dependencies specified by the sparse triangular matrix. For sparse triangular factors from incomplete factorizations, we demonstrate that moderate directed overlap with subdomain scheduling can improve convergence and time-to-solution.}, doi = {10.1007/978-3-319-40528-5_24}, author = {Hartwig Anzt and Edmond Chow and Daniel Szyld and Jack Dongarra}, editor = {Hans-Joachim Bungartz and Philipp Neumann and Wolfgang E. Nagel} } @inproceedings {992, title = {Efficiency of General Krylov Methods on GPUs {\textendash} An Experimental Study}, journal = {2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)}, year = {2016}, month = {2016-05}, pages = {683-691}, abstract = {This paper compares different Krylov methods based on short recurrences with respect to their efficiency whenimplemented on GPUs. The comparison includes BiCGSTAB, CGS, QMR, and IDR using different shadow space dimensions. These methods are known for their good convergencecharacteristics. For a large set of test matrices taken from theUniversity of Florida Matrix Collection, we evaluate the methods{\textquoteright}performance against different target metrics: convergence, number of sparse matrix-vector multiplications, and executiontime. We also analyze whether the methods are "orthogonal"in terms of problem suitability. We propose best practicesfor choosing methods in a "black box" scenario, where noinformation about the optimal solver is available.}, keywords = {algorithmic bombardment, BiCGSTAB, CGS, Convergence, Electric breakdown, gpu, graphics processing units, Hardware, IDR(s), Krylov solver, Libraries, linear systems, QMR, Sparse matrices}, doi = {10.1109/IPDPSW.2016.45}, author = {Hartwig Anzt and Jack Dongarra and Moritz Kreutzer and Gerhard Wellein and Martin Kohler} } @conference {937, title = {Efficiency of General Krylov Methods on GPUs {\textendash} An Experimental Study}, booktitle = {The Sixth International Workshop on Accelerators and Hybrid Exascale Systems (AsHES)}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {This paper compares different Krylov methods based on short recurrences with respect to their efficiency when implemented on GPUs. The comparison includes BiCGSTAB, CGS, QMR, and IDR using different shadow space dimensions. These methods are known for their good convergence characteristics. For a large set of test matrices taken from the University of Florida Matrix Collection, we evaluate the methods{\textquoteright} performance against different target metrics: convergence, number of sparse matrix-vector multiplications, and execution time. We also analyze whether the methods are {\textquotedblleft}orthogonal{\textquotedblright} in terms of problem suitability. We propose best practices for choosing methods in a {\textquotedblleft}black box{\textquotedblright} scenario, where no information about the optimal solver is available.}, keywords = {algorithmic bombardment, BiCGSTAB, CGS, gpu, IDR(s), Krylov solver, QMR}, doi = {10.1109/IPDPSW.2016.45}, author = {Hartwig Anzt and Jack Dongarra and Moritz Kreutzer and Gerhard Wellein and Martin Kohler} } @inproceedings {979, title = {Failure Detection and Propagation in HPC Systems}, journal = { Proceedings of the The International Conference for High Performance Computing, Networking, Storage and Analysis (SC{\textquoteright}16)}, year = {2016}, month = {2016-11}, pages = {27:1-27:11}, publisher = {IEEE Press}, address = {Salt Lake City, Utah}, keywords = {failure detection, fault-tolerance, MPI}, isbn = {978-1-4673-8815-3}, url = {http://dl.acm.org/citation.cfm?id=3014904.3014941}, author = {George Bosilca and Aurelien Bouteiller and Amina Guermouche and Thomas Herault and Yves Robert and Pierre Sens and Jack Dongarra} } @article {989, title = {Fine-grained Bit-Flip Protection for Relaxation Methods}, journal = {Journal of Computational Science}, year = {2016}, month = {2016-11}, abstract = {Resilience is considered a challenging under-addressed issue that the high performance computing community (HPC) will have to face in order to produce reliable Exascale systems by the beginning of the next decade. As part of a push toward a resilient HPC ecosystem, in this paper we propose an error-resilient iterative solver for sparse linear systems based on stationary component-wise relaxation methods. Starting from a plain implementation of the Jacobi iteration, our approach introduces a low-cost component-wise technique that detects bit-flips, rejecting some component updates, and turning the initial synchronized solver into an asynchronous iteration. Our experimental study with sparse incomplete factorizations from a collection of real-world applications, and a practical GPU implementation, exposes the convergence delay incurred by the fault-tolerant implementation and its practical performance.}, keywords = {Bit flips, Fault tolerance, High Performance Computing, iterative solvers, Jacobi method, sparse linear systems}, doi = {https://doi.org/10.1016/j.jocs.2016.11.013}, author = {Hartwig Anzt and Jack Dongarra and Enrique S. Quintana-Orti} } @conference {941, title = {GPU-Aware Non-contiguous Data Movement In Open MPI}, booktitle = {25th International Symposium on High-Performance Parallel and Distributed Computing (HPDC{\textquoteright}16)}, year = {2016}, month = {2016-06}, publisher = {ACM}, organization = {ACM}, address = {Kyoto, Japan}, abstract = {

Due to better parallel density and power efficiency, GPUs have become more popular for use in scientific applications. Many of these applications are based on the ubiquitous Message Passing Interface (MPI) programming paradigm, and take advantage of non-contiguous memory layouts to exchange data between processes. However, support for efficient non-contiguous data movements for GPU-resident data is still in its infancy, imposing a negative impact on the overall application performance.

To address this shortcoming, we present a solution where we take advantage of the inherent parallelism in the datatype packing and unpacking operations. We developed a close integration between Open MPI{\textquoteright}s stack-based datatype engine, NVIDIA{\textquoteright}s Uni ed Memory Architecture and GPUDirect capabilities. In this design the datatype packing and unpacking operations are offloaded onto the GPU and handled by specialized GPU kernels, while the CPU remains the driver for data movements between nodes. By incorporating our design into the Open MPI library we have shown significantly better performance for non-contiguous GPU-resident data transfers on both shared and distributed memory machines.

}, keywords = {datatype, gpu, hybrid architecture, MPI, non-contiguous data}, doi = {http://dx.doi.org/10.1145/2907294.2907317}, author = {Wei Wu and George Bosilca and Rolf vandeVaart and Sylvain Jeaugey and Jack Dongarra} } @conference {961, title = {Hessenberg Reduction with Transient Error Resilience on GPU-Based Hybrid Architectures}, booktitle = { 30th IEEE International Parallel \& Distributed Processing Symposium (IPDPS)}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {Graphics Processing Units (GPUs) have been seeing widespread adoption in the field of scientific computing, owing to the performance gains provided on computation-intensive applications. In this paper, we present the design and implementation of a Hessenberg reduction algorithm immune to simultaneous soft-errors, capable of taking advantage of hybrid GPU-CPU platforms. These soft-errors are detected and corrected on the fly, preventing the propagation of the error to the rest of the data. Our design is at the intersection between several fault tolerant techniques and employs the algorithm-based fault tolerance technique, diskless checkpointing, and reverse computation to achieve its goal. By utilizing the idle time of the CPUs, and by overlapping both host-side and GPU-side workloads, we minimize the resilience overhead. Experimental results have validated our design decisions as our algorithm introduced less than 2\% performance overhead compared to the optimized, but fault-prone, hybrid Hessenberg reduction.}, author = {Yulu Jia and Piotr Luszczek and Jack Dongarra} } @conference {939, title = {Heterogeneous Streaming}, booktitle = {The Sixth International Workshop on Accelerators and Hybrid Exascale Systems (AsHES), IPDPS 2016}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {This paper introduces a new heterogeneous streaming library called hetero Streams (hStreams). We show how a simple FIFO streaming model can be applied to heterogeneous systems that include manycore coprocessors and multicore CPUs. This model supports concurrency across nodes, among tasks within a node, and between data transfers and computation. We give examples for different approaches, show how the implementation can be layered, analyze overheads among layers, and apply those models to parallelize applications using simple, intuitive interfaces. We compare the features and versatility of hStreams, OpenMP, CUDA Streams1 and OmpSs. We show how the use of hStreams makes it easier for scientists to identify tasks and easily expose concurrency among them, and how it enables tuning experts and runtime systems to tailor execution for different heterogeneous targets. Practical application examples are taken from the field of numerical linear algebra, commercial structural simulation software, and a seismic processing application.}, keywords = {plasma}, author = {Chris J. Newburn and Gaurav Bansal and Michael Wood and Luis Crivelli and Judit Planas and Alejandro Duran and Paulo Souza and Leonardo Borges and Piotr Luszczek and Stanimire Tomov and Jack Dongarra and Hartwig Anzt and Mark Gates and Azzam Haidar and Yulu Jia and Khairul Kabir and Ichitaro Yamazaki and Jesus Labarta} } @article {984, title = {High Performance Conjugate Gradient Benchmark: A new Metric for Ranking High Performance Computing Systems}, journal = {International Journal of High Performance Computing Applications}, volume = {30}, year = {2016}, month = {2016-02}, pages = {3 - 10}, issn = {1094-3420}, doi = {10.1177/1094342015593158}, url = {http://hpc.sagepub.com/cgi/doi/10.1177/1094342015593158}, author = {Jack Dongarra and Michael A. Heroux and Piotr Luszczek} } @techreport {972, title = {High Performance Realtime Convex Solver for Embedded Systems}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-745}, year = {2016}, month = {2016-10}, abstract = {Convex optimization solvers for embedded systems find widespread use. This letter presents a novel technique to reduce the run-time of decomposition of KKT matrix for the convex optimization solver for an embedded system, by two orders of magnitude. We use the property that although the KKT matrix changes, some of its block sub-matrices are fixed during the solution iterations and the associated solving instances.}, keywords = {KKT, Realtime embedded convex optimization solver}, author = {Ichitaro Yamazaki and Saeid Nooshabadi and Stanimire Tomov and Jack Dongarra} } @conference {964, title = {High-performance Matrix-matrix Multiplications of Very Small Matrices}, booktitle = {22nd International European Conference on Parallel and Distributed Computing (Euro-Par{\textquoteright}16)}, year = {2016}, month = {2016-08}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Grenoble, France}, abstract = {The use of the general dense matrix-matrix multiplication (GEMM) is fundamental for obtaining high performance in many scientific computing applications. GEMMs for small matrices (of sizes less than 32) however, are not sufficiently optimized in existing libraries. In this paper we consider the case of many small GEMMs on either CPU or GPU architectures. This is a case that often occurs in applications like big data analytics, machine learning, high-order FEM, and others. The GEMMs are grouped together in a single batched routine. We present specialized for these cases algorithms and optimization techniques to obtain performance that is within 90\% of the optimal. We show that these results outperform currently available state-of-the-art implementations and vendor-tuned math libraries.}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jo{\"e}l Falcou and Jack Dongarra} } @conference {942, title = {High-Performance Tensor Contractions for GPUs}, booktitle = {International Conference on Computational Science (ICCS{\textquoteright}16)}, year = {2016}, month = {2016-06}, address = {San Diego, CA}, abstract = {We present a computational framework for high-performance tensor contractions on GPUs. High-performance is difficult to obtain using existing libraries, especially for many independent contractions where each contraction is very small, e.g., sub-vector/warp in size. However, using our framework to batch contractions plus application-specifics, we demonstrate close to peak performance results. In particular, to accelerate large scale tensor-formulated high-order finite element method (FEM) simulations, which is the main focus and motivation for this work, we represent contractions as tensor index reordering plus matrix-matrix multiplications (GEMMs). This is a key factor to achieve algorithmically many-fold acceleration (vs. not using it) due to possible reuse of data loaded in fast memory. In addition to using this context knowledge, we design tensor data-structures, tensor algebra interfaces, and new tensor contraction algorithms and implementations to achieve 90+\% of a theoretically derived peak on GPUs. On a K40c GPU for contractions resulting in GEMMs on square matrices of size 8 for example, we are 2.8{\texttimes} faster than CUBLAS, and 8.5{\texttimes} faster than MKL on 16 cores of Intel Xeon E5-2670 (Sandy Bridge) 2.60GHz CPUs. Finally, we apply autotuning and code generation techniques to simplify tuning and provide an architecture-aware, user-friendly interface.}, keywords = {Applications, Batched linear algebra, FEM, gpu, Tensor contractions, Tensor HPC}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @techreport {929, title = {High-Performance Tensor Contractions for GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-738}, year = {2016}, month = {2016-01}, publisher = {University of Tennessee}, abstract = {We present a computational framework for high-performance tensor contractions on GPUs. High-performance is difficult to obtain using existing libraries, especially for many independent contractions where each contraction is very small, e.g., sub-vector/warp in size. However, using our framework to batch contractions plus application-specifics, we demonstrate close to peak performance results. In particular, to accelerate large scale tensor-formulated high-order finite element method (FEM) simulations, which is the main focus and motivation for this work, we represent contractions as tensor index reordering plus matrix-matrix multiplications (GEMMs). This is a key factor to achieve algorithmically many-fold acceleration (vs. not using it) due to possible reuse of data loaded in fast memory. In addition to using this context knowledge, we design tensor data-structures, tensor algebra interfaces, and new tensor contraction algorithms and implementations to achieve 90+\% of a theoretically derived peak on GPUs. On a K40c GPU for contractions resulting in GEMMs on square matrices of size 8 for example, we are 2.8{\texttimes} faster than CUBLAS, and 8.5{\texttimes} faster than MKL on 16 cores of Intel Xeon ES-2670 (Sandy Bridge) 2.60GHz CPUs. Finally, we apply autotuning and code generation techniques to simplify tuning and provide an architecture-aware, user-friendly interface.}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @booklet {965, title = {The HPL Benchmark: Past, Present \& Future}, year = {2016}, month = {2016-07}, type = {Conference Presentation}, address = {ISC High Performance, Frankfurt, Germany}, author = {Jack Dongarra} } @article {1472, title = {Linear Algebra Software for Large-Scale Accelerated Multicore Computing}, journal = {Acta Numerica}, volume = {25}, year = {2016}, month = {2016-05}, pages = {1-160}, abstract = {Many crucial scientific computing applications, ranging from national security to medical advances, rely on high-performance linear algebra algorithms and technologies, underscoring their importance and broad impact. Here we present the state-of-the-art design and implementation practices for the acceleration of the predominant linear algebra algorithms on large-scale accelerated multicore systems. Examples are given with fundamental dense linear algebra algorithms {\textendash} from the LU, QR, Cholesky, and LDLT factorizations needed for solving linear systems of equations, to eigenvalue and singular value decomposition (SVD) problems. The implementations presented are readily available via the open-source PLASMA and MAGMA libraries, which represent the next generation modernization of the popular LAPACK library for accelerated multicore systems. To generate the extreme level of parallelism needed for the efficient use of these systems, algorithms of interest are redesigned and then split into well-chosen computational tasks. The task execution is scheduled over the computational components of a hybrid system of multicore CPUs with GPU accelerators and/or Xeon Phi coprocessors, using either static scheduling or light-weight runtime systems. The use of light-weight runtime systems keeps scheduling overheads low, similar to static scheduling, while enabling the expression of parallelism through sequential-like code. This simplifies the development effort and allows exploration of the unique strengths of the various hardware components. Finally, we emphasize the development of innovative linear algebra algorithms using three technologies {\textendash} mixed precision arithmetic, batched operations, and asynchronous iterations {\textendash} that are currently of high interest for accelerated multicore systems.}, doi = {10.1017/S0962492916000015}, author = {Ahmad Abdelfattah and Hartwig Anzt and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and undefined and Asim YarKhan} } @conference {963, title = {LU, QR, and Cholesky Factorizations: Programming Model, Performance Analysis and Optimization Techniques for the Intel Knights Landing Xeon Phi}, booktitle = {IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}16)}, year = {2016}, month = {2016-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {A wide variety of heterogeneous compute resources, ranging from multicore CPUs to GPUs and coprocessors, are available to modern computers, making it challenging to design unified numerical libraries that efficiently and productively use all these varied resources. For example, in order to efficiently use Intel{\textquoteright}s Knights Langing (KNL) processor, the next-generation of Xeon Phi architectures, one must design and schedule an application in multiple degrees of parallelism and task grain sizes in order to obtain efficient performance. We propose a productive and portable programming model that allows us to write a serial-looking code, which, however, achieves parallelism and scalability by using a lightweight runtime environment to manage the resource-specific workload, and to control the dataflow and the parallel execution. This is done through multiple techniques ranging from multi-level data partitioning to adaptive task grain sizes, and dynamic task scheduling. In addition, our task abstractions enable unified algorithmic development across all the heterogeneous resources. Finally, we outline the strengths and the effectiveness of this approach {\textendash} especially in regards to hardware trends and ease of programming high-performance numerical software that current applications need {\textendash} in order to motivate current work and future directions for the next generation of parallel programming models for high-performance linear algebra libraries on heterogeneous systems.}, author = {Azzam Haidar and Stanimire Tomov and Konstantin Arturov and Murat Guney and Shane Story and Jack Dongarra} } @techreport {, title = {MAGMA Batched: A Batched BLAS Approach for Small Matrix Factorizations and Applications on GPUs}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-16-02}, year = {2016}, month = {2016-08}, publisher = {University of Tennessee}, abstract = {A particularly challenging class of problems arising in many applications, called batched problems, involves linear algebra operations on many small-sized matrices. We proposed and designed batched BLAS (Basic Linear Algebra Subroutines), Level-2 GEMV and Level-3 GEMM, to solve them. We illustrate how batched GEMV and GEMM to be able to assist batched advance factorization (e.g. bi-diagonalization) and other BLAS routines (e.g. triangular solve) to achieve optimal performance on GPUs. Our solutions achieved up to 2.8-3{\texttimes} speedups compared to CUBLAS and MKL solutions, wherever possible. We illustrated the batched methodology on a real-world Hydrodynamic application by reformulating the tensor operations into batched BLAS GEMV and GEMM operations. A 2.5{\texttimes} speedup and a 1.4{\texttimes} greenup are obtained by changing 10\% of the code. We accelerated and scaled it on Titan supercomputer to 4096 nodes.}, author = {Tingxing Dong and Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Ahmad Abdelfattah and Jack Dongarra} } @article {, title = {A New Metric for Ranking High-Performance Computing Systems}, journal = {National Science Review}, volume = {3}, year = {2016}, month = {2016-01}, pages = {30-35}, doi = {https://doi.org/10.1093/nsr/nwv084}, author = {Jack Dongarra and Michael A. Heroux and Piotr Luszczek} } @article {971, title = {Non-GPU-resident Dense Symmetric Indefinite Factorization}, journal = {Concurrency and Computation: Practice and Experience}, year = {2016}, month = {2016-11}, abstract = {We study various algorithms to factorize a symmetric indefinite matrix that does not fit in the core memory of a computer. There are two sources of the data movement into the memory: one needed for selecting and applying pivots and the other needed to update each column of the matrix for the factorization. It is a challenge to obtain high performance of such an algorithm when the pivoting is required to ensure the numerical stability of the factorization. For example, when factorizing each column of the matrix, a diagonal entry, which ensures the stability, may need to be selected as a pivot among the remaining diagonals, and moved to the leading diagonal by swapping both the corresponding rows and columns of the matrix. If the pivot is not in the core memory, then it must be loaded into the core memory. For updating the matrix, the data locality may be improved by partitioning the matrix. For example, a right-looking partitioned algorithm first factorizes the leading columns, called panel, and then uses the factorized panel to update the trailing submatrix. This algorithm only accesses the trailing submatrix after each panel factorization (instead of after each column factorization) and performs most of its floating-point operations (flops) using BLAS-3, which can take advantage of the memory hierarchy. However, because the pivots cannot be predetermined, the whole trailing submatrix must be updated before the next panel factorization can start. When the whole submatrix does not fit in the core memory all at once, loading the block columns into the memory can become the performance bottleneck. Similarly, the left-looking variant of the algorithm would require to update each panel with all of the previously factorized columns. This makes it a much greater challenge to implement an efficient out-of-core symmetric indefinite factorization compared with an out-of-core nonsymmetric LU factorization with partial pivoting, which only requires to swap the rows of the matrix and accesses the trailing submatrix after each in-core factorization (instead of after each panel factorization by the symmetric factorization). To reduce the amount of the data transfer, in this paper we uses the recently proposed left-looking communication-avoiding variant of the symmetric factorization algorithm to factorize the columns in the core memory, and then perform the partitioned right-looking out-of-core trailing submatrix updates. This combination may still require to load the pivots into the core memory, but it only updates the trailing submatrix after each in-core factorization, while the previous algorithm updates it after each panel factorization.Although these in-core and out-of-core algorithms can be applied at any level of the memory hierarchy, we apply our designs to the GPU and CPU memory, respectively. We call this specific implementation of the algorithm a non{\textendash}GPU-resident implementation. Our performance results on the current hybrid CPU/GPU architecture demonstrate that when the matrix is much larger than the GPU memory, the proposed algorithm can obtain significant speedups over the communication-hiding implementations of the previous algorithms.}, doi = {10.1002/cpe.4012}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @conference {968, title = {Performance Analysis and Acceleration of Explicit Integration for Large Kinetic Networks using Batched GPU Computations}, booktitle = {2016 IEEE High Performance Extreme Computing Conference (HPEC {\textquoteleft}16)}, year = {2016}, month = {2016-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {We demonstrate the systematic implementation of recently-developed fast explicit kinetic integration algorithms that solve efficiently N coupled ordinary differential equations (subject to initial conditions) on modern GPUs. We take representative test cases (Type Ia supernova explosions) and demonstrate two or more orders of magnitude increase in efficiency for solving such systems (of realistic thermonuclear networks coupled to fluid dynamics). This implies that important coupled, multiphysics problems in various scientific and technical disciplines that were intractable, or could be simulated only with highly schematic kinetic networks, are now computationally feasible. As examples of such applications we present the computational techniques developed for our ongoing deployment of these new methods on modern GPU accelerators. We show that similarly to many other scientific applications, ranging from national security to medical advances, the computation can be split into many independent computational tasks, each of relatively small-size. As the size of each individual task does not provide sufficient parallelism for the underlying hardware, especially for accelerators, these tasks must be computed concurrently as a single routine, that we call batched routine, in order to saturate the hardware with enough work.}, author = {Azzam Haidar and Benjamin Brock and Stanimire Tomov and Michael Guidry and Jay Jay Billings and Daniel Shyles and Jack Dongarra} } @article {990, title = {On the performance and energy efficiency of sparse linear algebra on GPUs}, journal = {International Journal of High Performance Computing Applications}, year = {2016}, month = {2016-10}, abstract = {In this paper we unveil some performance and energy efficiency frontiers for sparse computations on GPU-based supercomputers. We compare the resource efficiency of different sparse matrix{\textendash}vector products (SpMV) taken from libraries such as cuSPARSE and MAGMA for GPU and Intel{\textquoteright}s MKL for multicore CPUs, and develop a GPU sparse matrix{\textendash}matrix product (SpMM) implementation that handles the simultaneous multiplication of a sparse matrix with a set of vectors in block-wise fashion. While a typical sparse computation such as the SpMV reaches only a fraction of the peak of current GPUs, we show that the SpMM succeeds in exceeding the memory-bound limitations of the SpMV. We integrate this kernel into a GPU-accelerated Locally Optimal Block Preconditioned Conjugate Gradient (LOBPCG) eigensolver. LOBPCG is chosen as a benchmark algorithm for this study as it combines an interesting mix of sparse and dense linear algebra operations that is typical for complex simulation applications, and allows for hardware-aware optimizations. In a detailed analysis we compare the performance and energy efficiency against a multi-threaded CPU counterpart. The reported performance and energy efficiency results are indicative of sparse computations on supercomputers.}, doi = {10.1177/1094342016672081}, url = {http://hpc.sagepub.com/content/early/2016/10/05/1094342016672081.abstract}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @techreport {934, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-739}, year = {2016}, month = {2016-02}, publisher = {University of Tennessee}, abstract = {Abstract. The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra. It is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, there becomes a need to have a high performance GEMM kernel for a batch of small matrices. Such kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both xed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance test reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, keywords = {Autotuning, Batched GEMM, GEMM, GPU computing, HPC}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {944, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, booktitle = {The International Supercomputing Conference (ISC High Performance 2016)}, year = {2016}, month = {2016-06}, address = {Frankfurt, Germany}, abstract = {The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra, and is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, a need arises for a high performance GEMM kernel for batches of small matrices. Such a kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both fixed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance tests reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, keywords = {Autotuning, Batched GEMM, GEMM, GPU computing, HPC}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @inbook {997, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, booktitle = {High Performance Computing: 31st International Conference, ISC High Performance 2016, Frankfurt, Germany, June 19-23, 2016, Proceedings}, number = {9697}, year = {2016}, pages = {21{\textendash}38}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra, and is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, a need arises for a high performance GEMM kernel for batches of small matrices. Such a kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both fixed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance tests reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, isbn = {978-3-319-41321-1}, doi = {10.1007/978-3-319-41321-1_2}, url = {http://dx.doi.org/10.1007/978-3-319-41321-1_2}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra}, editor = {Julian M. Kunkel and Pavan Balaji and Jack Dongarra} } @article {985, title = {Performance optimization of Sparse Matrix-Vector Multiplication for multi-component PDE-based applications using GPUs}, journal = {Concurrency and Computation: Practice and Experience}, volume = {28}, year = {2016}, month = {2016-05}, pages = {3447 - 3465}, abstract = {Simulations of many multi-component PDE-based applications, such as petroleum reservoirs or reacting flows, are dominated by the solution, on each time step and within each Newton step, of large sparse linear systems. The standard solver is a preconditioned Krylov method. Along with application of the preconditioner, memory-bound Sparse Matrix-Vector Multiplication (SpMV) is the most time-consuming operation in such solvers. Multi-species models produce Jacobians with a dense block structure, where the block size can be as large as a few dozen. Failing to exploit this dense block structure vastly underutilizes hardware capable of delivering high performance on dense BLAS operations. This paper presents a GPU-accelerated SpMV kernel for block-sparse matrices. Dense matrix-vector multiplications within the sparse-block structure leverage optimization techniques from the KBLAS library, a high performance library for dense BLAS kernels. The design ideas of KBLAS can be applied to block-sparse matrices. Furthermore, a technique is proposed to balance the workload among thread blocks when there are large variations in the lengths of nonzero rows. Multi-GPU performance is highlighted. The proposed SpMV kernel outperforms existing state-of-the-art implementations using matrices with real structures from different applications.}, doi = {10.1002/cpe.v28.1210.1002/cpe.3874}, url = {http://onlinelibrary.wiley.com/doi/10.1002/cpe.3874/full}, author = {Ahmad Abdelfattah and Hatem Ltaeif and David Keyes and Jack Dongarra} } @conference {943, title = {Performance Tuning and Optimization Techniques of Fixed and Variable Size Batched Cholesky Factorization on GPUs}, booktitle = {International Conference on Computational Science (ICCS{\textquoteright}16)}, year = {2016}, month = {2016-06}, address = {San Diego, CA}, abstract = {

Solving a large number of relatively small linear systems has recently drawn more attention in the HPC community, due to the importance of such computational workloads in many scientific applications, including sparse multifrontal solvers. Modern hardware accelerators and their architecture require a set of optimization techniques that are very different from the ones used in solving one relatively large matrix. In order to impose concurrency on such throughput-oriented architectures, a common practice is to batch the solution of these matrices as one task offloaded to the underlying hardware, rather than solving them individually.

This paper presents a high performance batched Cholesky factorization on large sets of relatively small matrices using Graphics Processing Units (GPUs), and addresses both fixed and variable size batched problems. We investigate various algorithm designs and optimization techniques, and show that it is essential to combine kernel design with performance tuning in order to achieve the best possible performance. We compare our approaches against state-of-the-art CPU solutions as well as GPU-based solutions using existing libraries, and show that, on a K40c GPU for example, our kernels are more than 2 faster.

}, keywords = {batched computation, Cholesky Factorization, GPUs, Tuning}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {986, title = {Porting the PLASMA Numerical Library to the OpenMP Standard}, journal = {International Journal of Parallel Programming}, year = {2016}, month = {2016-06}, abstract = {PLASMA is a numerical library intended as a successor to LAPACK for solving problems in dense linear algebra on multicore processors. PLASMA relies on the QUARK scheduler for efficient multithreading of algorithms expressed in a serial fashion. QUARK is a superscalar scheduler and implements automatic parallelization by tracking data dependencies and resolving data hazards at runtime. Recently, this type of scheduling has been incorporated in the OpenMP standard, which allows to transition PLASMA from the proprietary solution offered by QUARK to the standard solution offered by OpenMP. This article studies the feasibility of such transition.}, issn = {0885-7458}, doi = {10.1007/s10766-016-0441-6}, url = {http://link.springer.com/10.1007/s10766-016-0441-6http://link.springer.com/content/pdf/10.1007/s10766-016-0441-6http://link.springer.com/content/pdf/10.1007/s10766-016-0441-6.pdfhttp://link.springer.com/article/10.1007/s10766-016-0441-6/fulltext.html}, author = {Asim YarKhan and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @inproceedings {980, title = {Power Management and Event Verification in PAPI}, journal = {Tools for High Performance Computing 2015: Proceedings of the 9th International Workshop on Parallel Tools for High Performance Computing, September 2015, Dresden, Germany}, year = {2016}, pages = {pp. 41-51}, publisher = {Springer International Publishing}, address = {Dresden, Germany}, abstract = {For more than a decade, the PAPI performance monitoring library has helped to implement the familiar maxim attributed to Lord Kelvin: {\textquotedblleft}If you cannot measure it, you cannot improve it.{\textquotedblright} Widely deployed and widely used, PAPI provides a generic, portable interface for the hardware performance counters available on all modern CPUs and some other components of interest that are scattered across the chip and system. Recent and radical changes in processor and system design{\textemdash}systems that combine multicore CPUs and accelerators, shared and distributed memory, PCI- express and other interconnects{\textemdash}as well as the emergence of power efficiency as a primary design constraint, and reduced data movement as a primary programming goal, pose new challenges and bring new opportunities to PAPI. We discuss new developments of PAPI that allow for multiple sources of performance data to be measured simultaneously via a common software interface. Specifically, a new PAPI component that controls power is discussed. We explore the challenges of shared hardware counters that include system-wide measurements in existing multicore architectures. We conclude with an exploration of future directions for the PAPI interface. }, isbn = {978-3-319-39589-0}, doi = {https://doi.org/10.1007/978-3-319-39589-0_4}, author = {Heike Jagode and Asim YarKhan and Anthony Danalis and Jack Dongarra} } @techreport {966, title = {Report on the Sunway TaihuLight System}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-742}, year = {2016}, month = {2016-06}, publisher = {University of Tennessee}, url = {http://www.netlib.org/utk/people/JackDongarra/PAPERS/sunway-report-2016.pdf}, author = {Jack Dongarra} } @conference {962, title = {Search Space Generation and Pruning System for Autotuners}, booktitle = {30th IEEE International Parallel \& Distributed Processing Symposium (IPDPS)}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {This work tackles two simultaneous challenges faced by autotuners: the ease of describing a complex, multidimensional search space, and the speed of evaluating that space, while applying a multitude of pruning constraints. This article presents a declarative notation for describing a search space and a translation system for conversion to a standard C code for fast and multithreaded, as necessary, evaluation. The notation is Python-based and thus simple in syntax and easy to assimilate by the user interested in tuning rather than learning a new programming language. A large number of dimensions and a large number of pruning constraints may be expressed with little effort. The system is discussed in the context of autotuning the canonical matrix multiplication kernel for NVIDIA GPUs, where the search space has 15 dimensions and involves application of 10 complex pruning constrains. The speed of evaluation is compared against generators created using imperative programming style in various scripting and compiled languages.}, author = {Piotr Luszczek and Mark Gates and Jakub Kurzak and Anthony Danalis and Jack Dongarra} } @article {935, title = {Stability and Performance of Various Singular Value QR Implementations on Multicore CPU with a GPU}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {43}, year = {2016}, month = {2016-10}, abstract = {To orthonormalize a set of dense vectors, Singular Value QR (SVQR) requires only one global reduction between the parallel processing units, and uses BLAS-3 kernels to perform most of its local computation. As a result, compared to other orthogonalization schemes, SVQR obtains superior performance on many of the current computers. In this paper, we study the stability and performance of various SVQR implementations on multicore CPUs with a GPU, focusing on the dense triangular solve, which performs half of the total floating-point operations in SVQR. As a part of this study, we examine its adaptive mixed-precision variant that decides if a lower-precision arithmetic can be used for the triangular solution at runtime without increasing the order of its orthogonality error. Since the backward error of this adaptive mixed-precision variant is significantly greater than that of the standard SVQR, we study its effects on the solution convergence of several subspace projection methods for solving a linear system of equations and for computing singular values or eigenvalues of a sparse matrix. Our experimental results indicate that in some cases, the convergence rate of the solver may not be affected by the larger backward errors, while reducing the time to solution.}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @article {1343, title = {A Standard for Batched BLAS Routines}, year = {2016}, month = {2016-04}, publisher = {17th SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP16)}, address = {Paris, France}, author = {Pedro Valero-Lara and Jack Dongarra and Azzam Haidar and Samuel D. Relton and Stanimire Tomov and Mawussi Zounon} } @article {, title = {Sunway TaihuLight Supercomputer Makes Its Appearance}, journal = {National Science Review}, volume = {3}, year = {2016}, month = {2016-09}, pages = {256-266}, doi = {https://doi.org/10.1093/nsr/nww044}, author = {Jack Dongarra} } @conference {977, title = {Towards Achieving Performance Portability Using Directives for Accelerators}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC{\textquoteright}16), Third Workshop on Accelerator Programming Using Directives (WACCPD)}, year = {2016}, month = {2016-11}, publisher = {Innovative Computing Laboratory, University of Tennessee}, organization = {Innovative Computing Laboratory, University of Tennessee}, address = {Salt Lake City, Utah}, abstract = {In this paper we explore the performance portability of directives provided by OpenMP 4 and OpenACC to program various types of node architectures with attached accelerators, both self-hosted multicore and offload multicore/GPU. Our goal is to examine how successful OpenACC and the newer of- fload features of OpenMP 4.5 are for moving codes between architectures, how much tuning might be required and what lessons we can learn from this experience. To do this, we use examples of algorithms with varying computational intensities for our evaluation, as both compute and data access efficiency are important considerations for overall application performance. We implement these kernels using various methods provided by newer OpenACC and OpenMP implementations, and we evaluate their performance on various platforms including both X86 64 with attached NVIDIA GPUs, self-hosted Intel Xeon Phi KNL, as well as an X86 64 host system with Intel Xeon Phi coprocessors. In this paper, we explain what factors affected the performance portability such as how to pick the right programming model, its programming style, its availability on different platforms, and how well compilers can optimize and target to multiple platforms.}, author = {M. Graham Lopez and Larrea, V and Joubert, W and Hernandez, O and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {995, title = {Updating Incomplete Factorization Preconditioners for Model Order Reduction}, journal = {Numerical Algorithms}, volume = {73}, number = {3}, year = {2016}, month = {2016-02}, pages = {611{\textendash}630}, abstract = {When solving a sequence of related linear systems by iterative methods, it is common to reuse the preconditioner for several systems, and then to recompute the preconditioner when the matrix has changed significantly. Rather than recomputing the preconditioner from scratch, it is potentially more efficient to update the previous preconditioner. Unfortunately, it is not always known how to update a preconditioner, for example, when the preconditioner is an incomplete factorization. A recently proposed iterative algorithm for computing incomplete factorizations, however, is able to exploit an initial guess, unlike existing algorithms for incomplete factorizations. By treating a previous factorization as an initial guess to this algorithm, an incomplete factorization may thus be updated. We use a sequence of problems from model order reduction. Experimental results using an optimized GPU implementation show that updating a previous factorization can be inexpensive and effective, making solving sequences of linear systems a potential niche problem for the iterative incomplete factorization algorithm.}, keywords = {key publication}, doi = {10.1007/s11075-016-0110-2}, author = {Hartwig Anzt and Edmond Chow and Jens Saak and Jack Dongarra} } @conference {880, title = {Accelerating Collaborative Filtering for Implicit Feedback Datasets using GPUs}, booktitle = {2015 IEEE International Conference on Big Data (IEEE BigData 2015)}, year = {2015}, month = {2015-11}, publisher = {IEEE}, organization = {IEEE}, address = {Santa Clara, CA}, abstract = {In this paper we accelerate the Alternating Least Squares (ALS) algorithm used for generating product recommendations on the basis of implicit feedback datasets. We approach the algorithm with concepts proven to be successful in High Performance Computing. This includes the formulation of the algorithm as a mix of cache-optimized algorithm-specific kernels and standard BLAS routines, acceleration via graphics processing units (GPUs), use of parallel batched kernels, and autotuning to identify performance winners. For benchmark datasets, the multi-threaded CPU implementation we propose achieves more than a 10 times speedup over the implementations available in the GraphLab and Spark MLlib software packages. For the GPU implementation, the parameters of an algorithm-specific kernel were optimized using a comprehensive autotuning sweep. This results in an additional 2 times speedup over our CPU implementation.}, author = {Mark Gates and Hartwig Anzt and Jakub Kurzak and Jack Dongarra} } @conference {921, title = {Accelerating NWChem Coupled Cluster through dataflow-based Execution}, booktitle = {11th International Conference on Parallel Processing and Applied Mathematics (PPAM 2015)}, year = {2015}, month = {2015-09}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Krakow, Poland}, abstract = {Numerical techniques used for describing many-body systems, such as the Coupled Cluster methods (CC) of the quantum chemistry package NWChem, are of extreme interest to the computational chemistry community in fields such as catalytic reactions, solar energy, and bio-mass conversion. In spite of their importance, many of these computationally intensive algorithms have traditionally been thought of in a fairly linear fashion, or are parallelised in coarse chunks. In this paper, we present our effort of converting the NWChem{\textquoteright}s CC code into a dataflow-based form that is capable of utilizing the task scheduling system PaRSEC (Parallel Runtime Scheduling and Execution Controller) {\textendash} a software package designed to enable high performance computing at scale. We discuss the modularity of our approach and explain how the PaRSEC-enabled dataflow version of the subroutines seamlessly integrate into the NWChem codebase. Furthermore, we argue how the CC algorithms can be easily decomposed into finer grained tasks (compared to the original version of NWChem); and how data distribution and load balancing are decoupled and can be tuned independently. We demonstrate performance acceleration by more than a factor of two in the execution of the entire CC component of NWChem, concluding that the utilization of dataflow-based execution for CC methods enables more efficient and scalable computation.}, keywords = {CCSD, dag, dataflow, NWChem, parsec, ptg, tasks}, author = {Heike Jagode and Anthony Danalis and George Bosilca and Jack Dongarra} } @conference {914, title = {Accelerating the LOBPCG method on GPUs using a blocked Sparse Matrix Vector Product}, booktitle = {Spring Simulation Multi-Conference 2015 (SpringSim{\textquoteright}15)}, year = {2015}, month = {2015-04}, publisher = {SCS}, organization = {SCS}, address = {Alexandria, VA}, abstract = {This paper presents a heterogeneous CPU-GPU implementation for a sparse iterative eigensolver the Locally Optimal Block Preconditioned Conjugate Gradient (LOBPCG). For the key routine generating the Krylov search spaces via the product of a sparse matrix and a block of vectors, we propose a GPU kernel based on a modi ed sliced ELLPACK format. Blocking a set of vectors and processing them simultaneously accelerates the computation of a set of consecutive SpMVs significantly. Comparing the performance against similar routines from Intel{\textquoteright}s MKL and NVIDIA{\textquoteright}s cuSPARSE library we identify appealing performance improvements. We integrate it into the highly optimized LOBPCG implementation. Compared to the BLOBEX CPU implementation running on two eight-core Intel Xeon E5-2690s, we accelerate the computation of a small set of eigenvectors using NVIDIA{\textquoteright}s K40 GPU by typically more than an order of magnitude.}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @article {866, title = {Acceleration of GPU-based Krylov solvers via Data Transfer Reduction}, journal = {International Journal of High Performance Computing Applications}, year = {2015}, author = {Hartwig Anzt and William Sawyer and Stanimire Tomov and Piotr Luszczek and Jack Dongarra} } @conference {912, title = {Adaptive Precision Solvers for Sparse Linear Systems}, booktitle = {3rd International Workshop on Energy Efficient Supercomputing (E2SC {\textquoteright}15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, author = {Hartwig Anzt and Jack Dongarra and Enrique S. Quintana-Orti} } @article {820, title = {Algorithm-based Fault Tolerance for Dense Matrix Factorizations, Multiple Failures, and Accuracy}, journal = {ACM Transactions on Parallel Computing}, volume = {1}, number = {10}, year = {2015}, month = {2015-01}, pages = {10:1-10:28}, abstract = {Dense matrix factorizations, such as LU, Cholesky and QR, are widely used for scientific applications that require solving systems of linear equations, eigenvalues and linear least squares problems. Such computations are normally carried out on supercomputers, whose ever-growing scale induces a fast decline of the Mean Time To Failure (MTTF). This paper proposes a new hybrid approach, based on Algorithm-Based Fault Tolerance (ABFT), to help matrix factorizations algorithms survive fail-stop failures. We consider extreme conditions, such as the absence of any reliable node and the possibility of losing both data and checksum from a single failure. We will present a generic solution for protecting the right factor, where the updates are applied, of all above mentioned factorizations. For the left factor, where the panel has been applied, we propose a scalable checkpointing algorithm. This algorithm features high degree of checkpointing parallelism and cooperatively utilizes the checksum storage leftover from the right factor protection. The fault-tolerant algorithms derived from this hybrid solution is applicable to a wide range of dense matrix factorizations, with minor modifications. Theoretical analysis shows that the fault tolerance overhead decreases inversely to the scaling in the number of computing units and the problem size. Experimental results of LU and QR factorization on the Kraken (Cray XT5) supercomputer validate the theoretical evaluation and confirm negligible overhead, with- and without-errors. Applicability to tolerate multiple failures and accuracy after multiple recovery is also considered.}, keywords = {ABFT, algorithms, fault-tolerance, High Performance Computing, linear algebra}, doi = {10.1145/2686892}, author = {Aurelien Bouteiller and Thomas Herault and George Bosilca and Peng Du and Jack Dongarra}, editor = {Phillip B. Gibbons} } @conference {865, title = {Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs}, booktitle = {International Supercomputing Conference (ISC 2015)}, year = {2015}, month = {2015-07}, address = {Frankfurt, Germany}, author = {Edmond Chow and Hartwig Anzt and Jack Dongarra} } @conference {843, title = {Batched Matrix Computations on Hardware Accelerators}, booktitle = {EuroMPI/Asia 2015 Workshop}, year = {2015}, month = {2015-09}, address = {Bordeaux, France}, abstract = {Scientific applications require solvers that work on many small size problems that are independent from each other. At the same time, the high-end hardware evolves rapidly and becomes ever more throughput-oriented and thus there is an increasing need for effective approach to develop energy efficient, high-performance codes for these small matrix problems that we call batched factorizations. The many applications that need this functionality could especially benefit from the use of GPUs, which currently are four to five times more energy efficient than multicore CPUs on important scientific workloads. This paper, consequently, describes the development of the most common, one-sided factorizations: Cholesky, LU, and QR for a set of small dense matrices. The algorithms we present together with their implementations are, by design, inherently parallel. In particular, our approach is based on representing the process as a sequence of batched BLAS routines that are executed entirely on a GPU. Importantly, this is unlike the LAPACK and the hybridMAGMAfactorization algorithms that work under drastically different assumptions of hardware design and efficiency of execution of the various computational kernels involved in the implementation. Thus, our approach is more efficient than what works for a combination of multicore CPUs and GPUs for the problems sizes of interest of the application use cases. The paradigm where upon a single chip (a GPU or a CPU) factorizes a single problem at a time is not at all efficient for in our applications{\textquoteright} context. We illustrate all these claims through a detailed performance analysis. With the help of profiling and tracing tools, we guide our development of batched factorizations to achieve up to two-fold speedup and three-fold better energy efficiency as compared against our highly optimized batched CPU implementations based on MKL library. The tested system featured two sockets of Intel Sandy Bridge CPUs and we compared to a batched LU factorizations featured in the CUBLAS library for GPUs, we achieve as high as 2.5x speedup on the NVIDIA K40 GPU.}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {895, title = {Batched Matrix Computations on Hardware Accelerators Based on GPUs}, booktitle = {2015 SIAM Conference on Applied Linear Algebra (SIAM LA)}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {We will present techniques for small matrix computations on GPUs and their use for energy efficient, high-performance solvers. Work on small problems delivers high performance through improved data reuse. Many numerical libraries and applications need this functionality further developed. We describe the main factorizations LU, QR, and Cholesky for a set of small dense matrices in parallel. We achieve significant acceleration and reduced energy consumption against other solutions. Our techniques are of interest to GPU application developers in general.}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {858, title = {Batched matrix computations on hardware accelerators based on GPUs}, journal = {International Journal of High Performance Computing Applications}, year = {2015}, month = {2015-02}, abstract = {Scientific applications require solvers that work on many small size problems that are independent from each other. At the same time, the high-end hardware evolves rapidly and becomes ever more throughput-oriented and thus there is an increasing need for an effective approach to develop energy-efficient, high-performance codes for these small matrix problems that we call batched factorizations. The many applications that need this functionality could especially benefit from the use of GPUs, which currently are four to five times more energy efficient than multicore CPUs on important scientific workloads. This paper, consequently, describes the development of the most common, one-sided factorizations, Cholesky, LU, and QR, for a set of small dense matrices. The algorithms we present together with their implementations are, by design, inherently parallel. In particular, our approach is based on representing the process as a sequence of batched BLAS routines that are executed entirely on a GPU. Importantly, this is unlike the LAPACK and the hybrid MAGMA factorization algorithms that work under drastically different assumptions of hardware design and efficiency of execution of the various computational kernels involved in the implementation. Thus, our approach is more efficient than what works for a combination of multicore CPUs and GPUs for the problems sizes of interest of the application use cases. The paradigm where upon a single chip (a GPU or a CPU) factorizes a single problem at a time is not at all efficient in our applications{\textquoteright} context. We illustrate all of these claims through a detailed performance analysis. With the help of profiling and tracing tools, we guide our development of batched factorizations to achieve up to two-fold speedup and three-fold better energy efficiency as compared against our highly optimized batched CPU implementations based on MKL library. The tested system featured two sockets of Intel Sandy Bridge CPUs and we compared with a batched LU factorizations featured in the CUBLAS library for GPUs, we achieve as high as 2.5{\texttimes} speedup on the NVIDIA K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, doi = {10.1177/1094342014567546}, author = {Azzam Haidar and Tingxing Dong and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {928, title = {Cholesky Across Accelerators}, booktitle = {17th IEEE International Conference on High Performance Computing and Communications (HPCC 2015)}, year = {2015}, month = {2015-08}, publisher = {IEEE}, organization = {IEEE}, address = {Elizabeth, NJ}, author = {Asim YarKhan and Azzam Haidar and Chongxiao Cao and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {852, title = {Composing Resilience Techniques: ABFT, Periodic, and Incremental Checkpointing}, journal = {International Journal of Networking and Computing}, volume = {5}, number = {1}, year = {2015}, month = {2015-01}, pages = {2-15}, abstract = {Algorithm Based Fault Tolerant (ABFT) approaches promise unparalleled scalability and performance in failure-prone environments. Thanks to recent advances in the understanding of the involved mechanisms, a growing number of important algorithms (including all widely used factorizations) have been proven ABFT-capable. In the context of larger applications, these algorithms provide a temporal section of the execution, where the data is protected by its own intrinsic properties, and can therefore be algorithmically recomputed without the need of checkpoints. However, while typical scientific applications spend a significant fraction of their execution time in library calls that can be ABFT-protected, they interleave sections that are difficult or even impossible to protect with ABFT. As a consequence, the only practical fault-tolerance approach for these applications is checkpoint/restart. In this paper we propose a model to investigate the efficiency of a composite protocol, that alternates between ABFT and checkpoint/restart for the effective protection of an iterative application composed of ABFT- aware and ABFT-unaware sections. We also consider an incremental checkpointing composite approach in which the algorithmic knowledge is leveraged by a novel optimal dynamic program- ming to compute checkpoint dates. We validate these models using a simulator. The model and simulator show that the composite approach drastically increases the performance delivered by an execution platform, especially at scale, by providing the means to increase the interval between checkpoints while simultaneously decreasing the volume of each checkpoint. }, keywords = {ABFT, checkpoint, fault-tolerance, High-performance computing, model, performance evaluation, resilience}, issn = {ISSN 2185-2839}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Yves Robert and Jack Dongarra} } @article {851, title = {Computing Low-rank Approximation of a Dense Matrix on Multicore CPUs with a GPU and its Application to Solving a Hierarchically Semiseparable Linear System of Equations}, journal = {Scientific Programming}, year = {2015}, abstract = {Low-rank matrices arise in many scientific and engineering computation. Both computational and storage costs of manipulating such matrices may be reduced by taking advantages of their low-rank properties. To compute a low-rank approximation of a dense matrix, in this paper, we study the performance of QR factorization with column pivoting or with restricted pivoting on multicore CPUs with a GPU. We first propose several techniques to reduce the postprocessing time, which is required for restricted pivoting, on a modern CPU. We then examine the potential of using a GPU to accelerate the factorization process with both column and restricted pivoting. Our performance results on two eight-core Intel Sandy Bridge CPUs with one NVIDIA Kepler GPU demonstrate that using the GPU, the factorization time can be reduced by a factor of more than two. In addition, to study the performance of our implementations in practice, we integrate them into a recently-developed software StruMF which algebraically exploits such low-rank structures for solving a general sparse linear system of equations. Our performance results for solving Poisson{\textquoteright}s equations demonstrate that the proposed techniques can significantly reduce the preconditioner construction time of StruMF on the CPUs, and the construction time can be further reduced by 10\%-50\% using the GPU.}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @conference {862, title = {On the Design, Development, and Analysis of Optimized Matrix-Vector Multiplication Routines for Coprocessors}, booktitle = {ISC High Performance 2015}, year = {2015}, month = {2015-07}, address = {Frankfurt, Germany}, abstract = {The dramatic change in computer architecture due to the manycore paradigm shift, made the development of numerical routines that are optimal extremely challenging. In this work, we target the development of numerical algorithms and implementations for Xeon Phi coprocessor architecture designs. In particular, we examine and optimize the general and symmetric matrix-vector multiplication routines (gemv/symv), which are some of the most heavily used linear algebra kernels in many important engineering and physics applications. We describe a successful approach on how to address the challenges for this problem, starting from our algorithm design, performance analysis and programing model, to kernel optimization. Our goal, by targeting low-level, easy to understand fundamental kernels, is to develop new optimization strategies that can be effective elsewhere for the use on manycore coprocessors, and to show significant performance improvements compared to the existing state-of-the-art implementations. Therefore, in addition to the new optimization strategies, analysis, and optimal performance results, we finally present the significance of using these routines/strategies to accelerate higher-level numerical algorithms for the eigenvalue problem (EVP) and the singular value decomposition (SVD) that by themselves are foundational for many important applications.}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {840, title = {Design for a Soft Error Resilient Dynamic Task-based Runtime}, booktitle = {29th IEEE International Parallel \& Distributed Processing Symposium (IPDPS)}, year = {2015}, month = {2015-05}, publisher = {IEEE}, organization = {IEEE}, address = {Hyderabad, India}, abstract = {As the scale of modern computing systems grows, failures will happen more frequently. On the way to Exascale a generic, low-overhead, resilient extension becomes a desired aptitude of any programming paradigm. In this paper we explore three additions to a dynamic task-based runtime to build a generic framework providing soft error resilience to task-based programming paradigms. The first recovers the application by re-executing the minimum required sub-DAG, the second takes critical checkpoints of the data flowing between tasks to minimize the necessary re-execution, while the last one takes advantage of algorithmic properties to recover the data without re-execution. These mechanisms have been implemented in the PaRSEC task-based runtime framework. Experimental results validate our approach and quantify the overhead introduced by such mechanisms.}, author = {Chongxiao Cao and George Bosilca and Thomas Herault and Jack Dongarra} } @conference {894, title = {Efficient Eigensolver Algorithms on Accelerator Based Architectures}, booktitle = {2015 SIAM Conference on Applied Linear Algebra (SIAM LA)}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe a successful methodology on how to address the challenges -starting from our algorithm design, kernel optimization and tuning, to our programming model- in the development of a scalable high-performance symmetric eigenvalue and singular value solver.}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {886, title = {Efficient Implementation Of Quantum Materials Simulations On Distributed CPU-GPU Systems}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {We present a scalable implementation of the Linearized Augmented Plane Wave method for distributed memory systems, which relies on an efficient distributed, block-cyclic setup of the Hamiltonian and overlap matrices and allows us to turn around highly accurate 1000+ atom all-electron quantum materials simulations on clusters with a few hundred nodes. The implementation runs efficiently on standard multicore CPU nodes, as well as hybrid CPU-GPU nodes. The key for the latter is a novel algorithm to solve the generalized eigenvalue problem for dense, complex Hermitian matrices on distributed hybrid CPU-GPU systems. Performance tests for Li-intercalated CoO2 supercells containing 1501 atoms demonstrate that high-accuracy, transferable quantum simulations can now be used in throughput materials search problems. While our application can benefit and get scalable performance through CPU-only libraries like ScaLAPACK or ELPA2, our new hybrid solver enables the efficient use of GPUs and shows that a hybrid CPU-GPU architecture scales to a desired performance using substantially fewer cluster nodes, and notably, is considerably more energy efficient than the traditional multicore CPU only systems for such complex applications.}, author = {Raffaele Solc{\`a} and Anton Kozhevnikov and Azzam Haidar and Stanimire Tomov and Thomas C. Schulthess and Jack Dongarra} } @conference {857, title = {Energy Efficiency and Performance Frontiers for Sparse Computations on GPU Supercomputers}, booktitle = {Sixth International Workshop on Programming Models and Applications for Multicores and Manycores (PMAM {\textquoteright}15)}, year = {2015}, month = {2015-02}, publisher = {ACM}, organization = {ACM}, address = {San Francisco, CA}, abstract = {In this paper we unveil some energy efficiency and performance frontiers for sparse computations on GPU-based supercomputers. To do this, we consider state-of-the-art implementations of the sparse matrix-vector (SpMV) product in libraries like cuSPARSE, MKL, and MAGMA, and their use in the LOBPCG eigen-solver. LOBPCG is chosen as a benchmark for this study as it combines an interesting mix of sparse and dense linear algebra operations with potential for hardware-aware optimizations. Most notably, LOBPCG includes a blocking technique that is a common performance optimization for many applications. In particular, multiple memory-bound SpMV operations are blocked into a SpM-matrix product (SpMM), that achieves significantly higher performance than a sequence of SpMVs. We provide details about the GPU kernels we use for the SpMV, SpMM, and the LOBPCG implementation design, and study performance and energy consumption compared to CPU solutions. While a typical sparse computation like the SpMV reaches only a fraction of the peak of current GPUs, we show that the SpMM achieves up to a 6x performance improvement over the GPU{\textquoteright}s SpMV, and the GPU-accelerated LOBPCG based on this kernel is 3 to 5x faster than multicore CPUs with the same power draw, e.g., a K40 GPU vs. two Sandy Bridge CPUs (16 cores). In practice though, we show that currently available CPU implementations are much slower due to missed optimization opportunities. These performance results translate to similar improvements in energy consumption, and are indicative of today{\textquoteright}s frontiers in energy efficiency and performance for sparse computations on supercomputers.}, isbn = {978-1-4503-3404-4}, doi = {10.1145/2712386.2712387}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @booklet {918, title = { Exascale Computing and Big Data}, journal = {Communications of the ACM}, volume = {58}, number = {7}, year = {2015}, month = {2015-07}, pages = {56-68}, publisher = {ACM}, type = {Magazine Article}, abstract = {Scientific discovery and engineering innovation requires unifying traditionally separated high-performance computing and big data analytics.}, doi = {10.1145/2699414}, author = {Dan Reed and Jack Dongarra} } @article {982, title = {Experiences in Autotuning Matrix Multiplication for Energy Minimization on GPUs}, journal = {Concurrency and Computation: Practice and Experience}, volume = {27}, year = {2015}, month = {12-Oct}, pages = {5096 - 5113}, abstract = {In this paper, we report extensive results and analysis of autotuning the computationally intensive graphics processing units kernel for dense matrix{\textendash}matrix multiplication in double precision. In contrast to traditional autotuning and/or optimization for runtime performance only, we also take the energy efficiency into account. For kernels achieving equal performance, we show significant differences in their energy balance. We also identify the memory throughput as the most influential metric that trades off performance and energy efficiency. As a result, the performance optimal case ends up not being the most efficient kernel in overall resource use.}, keywords = {Autotuning, energy efficiency, hardware accelerators, matrix multiplication, power}, doi = {10.1002/cpe.3516}, url = {http://doi.wiley.com/10.1002/cpe.3516https://api.wiley.com/onlinelibrary/tdm/v1/articles/10.1002\%2Fcpe.3516}, author = {Hartwig Anzt and Blake Haugen and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @article {873, title = {Experiences in autotuning matrix multiplication for energy minimization on GPUs}, journal = {Concurrency in Computation: Practice and Experience}, volume = {27}, year = {2015}, month = {2015-12}, pages = {5096-5113}, doi = {10.1002/cpe.3516}, author = {Hartwig Anzt and Blake Haugen and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @techreport {919, title = {Fault Tolerance Techniques for High-performance Computing}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 289)}, number = {UT-EECS-15-734}, year = {2015}, month = {2015-05}, publisher = {University of Tennessee}, abstract = {This report provides an introduction to resilience methods. The emphasis is on checkpointing, the de-facto standard technique for resilience in High Performance Computing. We present the main two protocols, namely coordinated checkpointing and hierarchical checkpointing. Then we introduce performance models and use them to assess the performance of theses protocols. We cover the Young/Daly formula for the optimal period and much more! Next we explain how the efficiency of checkpointing can be improved via fault prediction or replication. Then we move to application-specific methods, such as ABFT. We conclude the report by discussing techniques to cope with silent errors (or silent data corruption).}, url = {http://www.netlib.org/lapack/lawnspdf/lawn289.pdf}, author = {Jack Dongarra and Thomas Herault and Yves Robert} } @conference {887, title = {Flexible Linear Algebra Development and Scheduling with Cholesky Factorization}, booktitle = {17th IEEE International Conference on High Performance Computing and Communications}, year = {2015}, month = {2015-08}, address = {Newark, NJ}, abstract = {Modern high performance computing environments are composed of networks of compute nodes that often contain a variety of heterogeneous compute resources, such as multicore-CPUs, GPUs, and coprocessors. One challenge faced by domain scientists is how to efficiently use all these distributed, heterogeneous resources. In order to use the GPUs effectively, the workload parallelism needs to be much greater than the parallelism for a multicore-CPU. On the other hand, a Xeon Phi coprocessor will work most effectively with degree of parallelism between GPUs and multicore-CPUs. Additionally, effectively using distributed memory nodes brings out another level of complexity where the workload must be carefully partitioned over the nodes. In this work we are using a lightweight runtime environment to handle many of the complexities in such distributed, heterogeneous systems. The runtime environment uses task-superscalar concepts to enable the developer to write serial code while providing parallel execution. The task-programming model allows the developer to write resource-specialization code, so that each resource gets the appropriate sized workload-grain. Our task programming abstraction enables the developer to write a single algorithm that will execute efficiently across the distributed heterogeneous machine. We demonstrate the effectiveness of our approach with performance results for dense linear algebra applications, specifically the Cholesky factorization.}, author = {Azzam Haidar and Asim YarKhan and Chongxiao Cao and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {864, title = {Framework for Batched and GPU-resident Factorization Algorithms to Block Householder Transformations}, booktitle = {ISC High Performance}, year = {2015}, month = {2015-07}, publisher = {Springer}, organization = {Springer}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Tingxing Dong and Stanimire Tomov and Piotr Luszczek and Jack Dongarra} } @conference {913, title = {GPU-accelerated Co-design of Induced Dimension Reduction: Algorithmic Fusion and Kernel Overlap}, booktitle = {2nd International Workshop on Hardware-Software Co-Design for High Performance Computing}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {In this paper we present an optimized GPU co-design of the Induced Dimension Reduction (IDR) algorithm for solving linear systems. Starting from a baseline implementation based on the generic BLAS routines from the MAGMA software library, we apply optimizations that are based on kernel fusion and kernel overlap. Runtime experiments are used to investigate the benefit of the distinct optimization techniques for different variants of the IDR algorithm. A comparison to the reference implementation reveals that the interplay between them can succeed in cutting the overall runtime by up to about one third.}, author = {Hartwig Anzt and Eduardo Ponce and Gregory D. Peterson and Jack Dongarra} } @conference {841, title = {Hierarchical DAG scheduling for Hybrid Distributed Systems}, booktitle = {29th IEEE International Parallel \& Distributed Processing Symposium (IPDPS)}, year = {2015}, month = {2015-05}, publisher = {IEEE}, organization = {IEEE}, address = {Hyderabad, India}, abstract = {Accelerator-enhanced computing platforms have drawn a lot of attention due to their massive peak com-putational capacity. Despite significant advances in the pro-gramming interfaces to such hybrid architectures, traditional programming paradigms struggle mapping the resulting multi-dimensional heterogeneity and the expression of algorithm parallelism, resulting in sub-optimal effective performance. Task-based programming paradigms have the capability to alleviate some of the programming challenges on distributed hybrid many-core architectures. In this paper we take this concept a step further by showing that the potential of task-based programming paradigms can be greatly increased with minimal modification of the underlying runtime combined with the right algorithmic changes. We propose two novel recursive algorithmic variants for one-sided factorizations and describe the changes to the PaRSEC task-scheduling runtime to build a framework where the task granularity is dynamically adjusted to adapt the degree of available parallelism and kernel effi-ciency according to runtime conditions. Based on an extensive set of results we show that, with one-sided factorizations, i.e. Cholesky and QR, a carefully written algorithm, supported by an adaptive tasks-based runtime, is capable of reaching a degree of performance and scalability never achieved before in distributed hybrid environments. }, keywords = {dense linear algebra, gpu, heterogeneous architecture, PaRSEC runtime}, author = {Wei Wu and Aurelien Bouteiller and George Bosilca and Mathieu Faverge and Jack Dongarra} } @inbook {927, title = {High-Performance Computing}, booktitle = {The Princeton Companion to Applied Mathematics}, year = {2015}, pages = {839-842}, publisher = {Princeton University Press}, organization = {Princeton University Press}, address = {Princeton, New Jersey}, isbn = {9781400874477}, author = {Jack Dongarra and Nicholas J. Higham and Mark R. Dennis and Paul Glendinning and Paul A. Martin and Fadil Santosa and Jared Tanner} } @article {923, title = {High-Performance Conjugate-Gradient Benchmark: A New Metric for Ranking High-Performance Computing Systems}, journal = {The International Journal of High Performance Computing Applications}, year = {2015}, abstract = {We describe a new high-performance conjugate-gradient (HPCG) benchmark. HPCG is composed of computations and data-access patterns commonly found in scientific applications. HPCG strives for a better correlation to existing codes from the computational science domain and to be representative of their performance. HPCG is meant to help drive the computer system design and implementation in directions that will better impact future performance improvement.}, keywords = {Additive Schwarz, HPC Benchmarking, Multigrid smoothing, Preconditioned Conjugate Gradient, Validation and Verification}, doi = {10.1177/1094342015593158}, author = {Jack Dongarra and Michael A. Heroux and Piotr Luszczek} } @article {829, title = {HPC Programming on Intel Many-Integrated-Core Hardware with MAGMA Port to Xeon Phi}, journal = {Scientific Programming}, volume = {23}, year = {2015}, month = {2015-01}, abstract = {This paper presents the design and implementation of several fundamental dense linear algebra (DLA) algorithms for multicore with Intel Xeon Phi Coprocessors. In particular, we consider algorithms for solving linear systems. Further, we give an overview of the MAGMA MIC library, an open source, high performance library that incorporates the developments presented, and in general provides to heterogeneous architectures of multicore with coprocessors the DLA functionality of the popular LAPACK library. The LAPACK-compliance simplifies the use of the MAGMA MIC library in applications, while providing them with portably performant DLA. High performance is obtained through use of the high-performance BLAS, hardware-specific tuning, and a hybridization methodology where we split the algorithm into computational tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components. Our methodology and programming techniques are incorporated into the MAGMA MIC API, which abstracts the application developer from the specifics of the Xeon Phi architecture and is therefore applicable to algorithms beyond the scope of DLA.}, keywords = {communication and computation overlap, dynamic runtime scheduling using dataflow dependences, hardware accelerators and coprocessors, Intel Xeon Phi processor, Many Integrated Cores, numerical linear algebra}, issn = {1058-9244}, doi = {10.3233/SPR-140404}, author = {Azzam Haidar and Jack Dongarra and Khairul Kabir and Mark Gates and Piotr Luszczek and Stanimire Tomov and Yulu Jia} } @techreport {920, title = {HPCG Benchmark: a New Metric for Ranking High Performance Computing Systems}, journal = {University of Tennessee Computer Science Technical Report }, number = {ut-eecs-15-736}, year = {2015}, month = {2015-01}, publisher = {University of Tennessee}, abstract = {We describe a new high performance conjugate gradient (HPCG) benchmark. HPCG is composed of computations and data access patterns commonly found in scientific applications. HPCG strives for a better correlation to existing codes from the computational science domain and be representative of their performance. HPCG is meant to help drive the computer system design and implementation in directions that will better impact future performance improvement.}, keywords = {Additive Schwarz, HPC Benchmarking, Multigrid smoothing, Preconditioned Conjugate Gradient, Validation and Verification}, url = {http://www.eecs.utk.edu/resources/library/file/1047/ut-eecs-15-736.pdf}, author = {Jack Dongarra and Michael A. Heroux and Piotr Luszczek} } @article {881, title = {Implementation and Tuning of Batched Cholesky Factorization and Solve for NVIDIA GPUs}, journal = {IEEE Transactions on Parallel and Distributed Systems}, number = {1045-9219}, year = {2015}, month = {2015-11}, author = {Jakub Kurzak and Hartwig Anzt and Mark Gates and Jack Dongarra} } @conference {878, title = {Iterative Sparse Triangular Solves for Preconditioning}, booktitle = {EuroPar 2015}, year = {2015}, month = {2015-08}, publisher = {Springer Berlin}, organization = {Springer Berlin}, address = {Vienna, Austria}, abstract = {Sparse triangular solvers are typically parallelized using level scheduling techniques, but parallel eciency is poor on high-throughput architectures like GPUs. We propose using an iterative approach for solving sparse triangular systems when an approximation is suitable. This approach will not work for all problems, but can be successful for sparse triangular matrices arising from incomplete factorizations, where an approximate solution is acceptable. We demonstrate the performance gains that this approach can have on GPUs in the context of solving sparse linear systems with a preconditioned Krylov subspace method. We also illustrate the effect of using asynchronous iterations.}, doi = {10.1007/978-3-662-48096-0_50}, url = {http://dx.doi.org/10.1007/978-3-662-48096-0_50}, author = {Hartwig Anzt and Edmond Chow and Jack Dongarra} } @conference {888, title = {MAGMA Embedded: Towards a Dense Linear Algebra Library for Energy Efficient Extreme Computing}, booktitle = { 2015 IEEE High Performance Extreme Computing Conference (HPEC {\textquoteright}15), (Best Paper Award)}, year = {2015}, month = {2015-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {Embedded computing, not only in large systems like drones and hybrid vehicles, but also in small portable devices like smart phones and watches, gets more extreme to meet ever increasing demands for extended and improved functionalities. This, combined with the typical constrains for low power consumption and small sizes, makes the design of numerical libraries for embedded systems challenging. In this paper, we present the design and implementation of embedded system aware algorithms, that target these challenges in the area of dense linear algebra. We consider the fundamental problems of solving linear systems of equations and least squares problems, using the LU, QR, and Cholesky factorizations, and illustrate our results, both in terms of performance and energy efficiency, on the Jetson TK1 development kit. We developed performance optimizations for both small and large problems. In contrast to the corresponding LAPACK algorithms, the new designs target the use of many-cores, readily available now even in mobile devices like the Jetson TK1, e.g., featuring 192 CUDA cores. The implementations presented will form the core of a MAGMA Embedded library, to be released as part of the MAGMA libraries. }, author = {Azzam Haidar and Stanimire Tomov and Piotr Luszczek and Jack Dongarra} } @article {1347, title = {MAGMA MIC: Optimizing Linear Algebra for Intel Xeon Phi}, year = {2015}, month = {2015-06}, publisher = {ISC High Performance (ISC15), Intel Booth Presentation}, address = {Frankfurt, Germany}, author = {Hartwig Anzt and Jack Dongarra and Mark Gates and Azzam Haidar and Khairul Kabir and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @conference {891, title = {Mixed-precision Block Gram Schmidt Orthogonalization}, booktitle = {6th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {The mixed-precision Cholesky QR (CholQR) can orthogonalize the columns of a dense matrix with the minimum communication cost. Moreover, its orthogonality error depends only linearly to the condition number of the input matrix. However, when the desired higher-precision is not supported by the hardware, the software-emulated arithmetics are needed, which could significantly increase its computational cost. When there are a large number of columns to be orthogonalized, this computational overhead can have a significant impact on the orthogonalization time, and the mixed-precision CholQR can be much slower than the standard CholQR. In this paper, we examine several block variants of the algorithm, which reduce the computational overhead associated with the software-emulated arithmetics, while maintaining the same orthogonality error bound as the mixed-precision CholQR. Our numerical and performance results on multicore CPUs with a GPU, as well as a hybrid CPU/GPU cluster, demonstrate that compared to the mixed-precision CholQR, such a block variant can obtain speedups of up to 7:1 while maintaining about the same order of the numerical errors.}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jakub Kurzak and Jack Dongarra and Jesse Barlow} } @article {856, title = {Mixed-Precision Cholesky QR Factorization and its Case Studies on Multicore CPU with Multiple GPUs}, journal = {SIAM Journal on Scientific Computing}, volume = {37}, number = {3}, year = {2015}, month = {2015-05}, pages = {C203-C330}, abstract = {To orthonormalize the columns of a dense matrix, the Cholesky QR (CholQR) requires only one global reduction between the parallel processing units and performs most of its computation using BLAS-3 kernels. As a result, compared to other orthogonalization algorithms, CholQR obtains superior performance on many of the current computer architectures, where the communication is becoming increasingly expensive compared to the arithmetic operations. This is especially true when the input matrix is tall-skinny. Unfortunately, the orthogonality error of CholQR depends quadratically on the condition number of the input matrix, and it is numerically unstable when the matrix is ill-conditioned. To enhance the stability of CholQR, we recently used mixed-precision arithmetic; the input and output matrices are in the working precision, but some of its intermediate results are accumulated in the doubled precision. In this paper, we analyze the numerical properties of this mixed-precision CholQR. Our analysis shows that by selectively using the doubled precision, the orthogonality error of the mixed-precision CholQR only depends linearly on the condition number of the input matrix. We provide numerical results to demonstrate the improved numerical stability of the mixed-precision CholQR in practice. We then study its performance. When the target hardware does not support the desired higher precision, software emulation is needed. For example, using software-emulated double-double precision for the working 64-bit double precision, the mixed-precision CholQR requires about 8.5x more floating-point instructions than that required by the standard CholQR. On the other hand, the increase in the communication cost using the double-double precision is less significant, and our performance results on multicore CPU with a different graphics processing unit (GPU) demonstrate that the overhead of using the double-double arithmetic is decreasing on a newer architecture, where the computation is becoming less expensive compared to the communication. As a result, with a latest NVIDIA GPU, the mixed-precision CholQR was only 1.4x slower than the standard CholQR. Finally, we present case studies of using the mixed-precision CholQR within communication-avoiding variants of Krylov subspace projection methods for solving a nonsymmetric linear system of equations and for solving a symmetric eigenvalue problem, on a multicore CPU with multiple GPUs. These case studies demonstrate that by using the higher precision for this small but critical segment of the Krylov methods, we can improve not only the overall numerical stability of the solvers but also, in some cases, their performance.}, doi = {DOI:10.1137/14M0973773}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @conference {897, title = {Mixed-precision orthogonalization process Performance on multicore CPUs with GPUs}, booktitle = {2015 SIAM Conference on Applied Linear Algebra}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {Orthogonalizing a set of dense vectors is an important computational kernel in subspace projection methods for solving large-scale problems. In this talk, we discuss our efforts to improve the performance of the kernel, while maintaining its numerical accuracy. Our experimental results demonstrate the effectiveness of our approaches.}, author = {Ichitaro Yamazaki and Jesse Barlow and Stanimire Tomov and Jakub Kurzak and Jack Dongarra} } @article {917, title = {Mixing LU-QR Factorization Algorithms to Design High-Performance Dense Linear Algebra Solvers}, journal = {Journal of Parallel and Distributed Computing}, volume = {85}, year = {2015}, month = {2015-11}, pages = {32-46}, abstract = {This paper introduces hybrid LU{\textendash}QR algorithms for solving dense linear systems of the form Ax=b. Throughout a matrix factorization, these algorithms dynamically alternate LU with local pivoting and QR elimination steps based upon some robustness criterion. LU elimination steps can be very efficiently parallelized, and are twice as cheap in terms of floating-point operations, as QR steps. However, LU steps are not necessarily stable, while QR steps are always stable. The hybrid algorithms execute a QR step when a robustness criterion detects some risk for instability, and they execute an LU step otherwise. The choice between LU and QR steps must have a small computational overhead and must provide a satisfactory level of stability with as few QR steps as possible. In this paper, we introduce several robustness criteria and we establish upper bounds on the growth factor of the norm of the updated matrix incurred by each of these criteria. In addition, we describe the implementation of the hybrid algorithms through an extension of the PaRSEC software to allow for dynamic choices during execution. Finally, we analyze both stability and performance results compared to state-of-the-art linear solvers on parallel distributed multicore platforms. A comprehensive set of experiments shows that hybrid LU{\textendash}QR algorithms provide a continuous range of trade-offs between stability and performances.}, keywords = {lu factorization, Numerical algorithms, QR factorization, Stability; Performance}, doi = {doi:10.1016/j.jpdc.2015.06.007}, author = {Mathieu Faverge and Julien Herrmann and Julien Langou and Bradley Lowery and Yves Robert and Jack Dongarra} } @conference {916, title = {Optimization for Performance and Energy for Batched Matrix Computations on GPUs}, booktitle = {8th Workshop on General Purpose Processing Using GPUs (GPGPU 8)}, year = {2015}, month = {2015-02}, publisher = {ACM}, organization = {ACM}, address = {San Francisco, CA}, abstract = {As modern hardware keeps evolving, an increasingly effective approach to develop energy efficient and high-performance solvers is to design them to work on many small size independent problems. Many applications already need this functionality, especially for GPUs, which are known to be currently about four to five times more energy efficient than multicore CPUs. We describe the development of the main one-sided factorizations that work for a set of small dense matrices in parallel, and we illustrate our techniques on the LU and Cholesky factorizations. We refer to this mode of operation as a batched factorization. Our approach is based on representing the algorithms as a sequence of batched BLAS routines for GPU-only execution. The goal of avoiding multicore CPU use, e.g., as in the hybrid CPU-GPU algorithms, is to exclusively benefit from the GPU{\textquoteright}s significantly higher energy efficiency, as well as from the removal of the costly CPU-to-GPU communications. Furthermore, we do not use a single symmetric multiprocessor (on the GPU) to factorize a single problem at a time. We illustrate how our performance analysis and the use of profiling and tracing tools guided the development and optimization of batched factorizations to achieve up to 2-fold speedup and 3-fold better energy efficiency compared to our highly optimized batched CPU implementations based on the MKL library (when using two sockets of Intel Sandy Bridge CPUs). Compared to a batched LU factorization featured in the CUBLAS library for GPUs, we achieved up to 2.5 speedup on the K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, doi = {10.1145/2716282.2716288}, author = {Azzam Haidar and Tingxing Dong and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {936, title = {Parallel Programming Models for Dense Linear Algebra on Heterogeneous Systems}, journal = {Supercomputing Frontiers and Innovations}, volume = {2}, number = {4}, year = {2015}, month = {2015-10}, abstract = {We present a review of the current best practices in parallel programming models for dense linear algebra (DLA) on heterogeneous architectures. We consider multicore CPUs, stand alone manycore coprocessors, GPUs, and combinations of these. Of interest is the evolution of the programming models for DLA libraries {\textendash} in particular, the evolution from the popular LAPACK and ScaLAPACK libraries to their modernized counterparts PLASMA (for multicore CPUs) and MAGMA (for heterogeneous architectures), as well as other programming models and libraries. Besides providing insights into the programming techniques of the libraries considered, we outline our view of the current strengths and weaknesses of their programming models {\textendash} especially in regards to hardware trends and ease of programming high-performance numerical software that current applications need {\textendash} in order to motivate work and future directions for the next generation of parallel programming models for high-performance linear algebra libraries on heterogeneous systems.}, keywords = {dense linear algebra, gpu, HPC, Multicore, plasma, Programming models, runtime}, doi = {10.14529/jsfi1504}, author = {Maksims Abalenkovs and Ahmad Abdelfattah and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki and Asim YarKhan} } @conference {915, title = {PaRSEC in Practice: Optimizing a Legacy Chemistry Application through Distributed Task-Based Execution}, booktitle = {2015 IEEE International Conference on Cluster Computing}, year = {2015}, month = {2015-09}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {Task-based execution has been growing in popularity as a means to deliver a good balance between performance and portability in the post-petascale era. The Parallel Runtime Scheduling and Execution Control (PARSEC) framework is a task-based runtime system that we designed to achieve high performance computing at scale. PARSEC offers a programming paradigm that is different than what has been traditionally used to develop large scale parallel scientific applications. In this paper, we discuss the use of PARSEC to convert a part of the Coupled Cluster (CC) component of the Quantum Chemistry package NWCHEM into a task-based form. We explain how we organized the computation of the CC methods in individual tasks with explicitly defined data dependencies between them and re-integrated the modified code into NWCHEM. We present a thorough performance evaluation and demonstrate that the modified code outperforms the original by more than a factor of two. We also compare the performance of different variants of the modified code and explain the different behaviors that lead to the differences in performance.}, keywords = {dag, parsec, ptg, tasks}, author = {Anthony Danalis and Heike Jagode and George Bosilca and Jack Dongarra} } @conference {860, title = {Performance Analysis and Design of a Hessenberg Reduction using Stabilized Blocked Elementary Transformations for New Architectures}, booktitle = {The Spring Simulation Multi-Conference 2015 (SpringSim{\textquoteright}15), Best Paper Award}, year = {2015}, month = {2015-04}, address = {Alexandria, VA}, abstract = {The solution of nonsymmetric eigenvalue problems, Ax = λx, can be accelerated substantially by first reducing A to an upper Hessenberg matrix H that has the same eigenvalues as A. This can be done using Householder orthogonal transformations, which is a well established standard, or stabilized elementary transformations. The latter approach, although having half the flops of the former, has been used less in practice, e.g., on computer architectures with well developed hierarchical memories, because of its memory-bound operations and the complexity in stabilizing it. In this paper we revisit the stabilized elementary transformations approach in the context of new architectures {\textendash} both multicore CPUs and Xeon Phi coprocessors. We derive for a first time a blocking version of the algorithm. The blocked version reduces the memory-bound operations and we analyze its performance. A performance model is developed that shows the limitations of both approaches. The competitiveness of using stabilized elementary transformations has been quantified, highlighting that it can be 20 to 30\% faster on current high-end multicore CPUs and Xeon Phi coprocessors.}, keywords = {Eigenvalues problem, Hessenberg reduction, Multi/Many-core, Stabilized Elementary Transformations}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {861, title = {Performance Analysis and Optimization of Two-Sided Factorization Algorithms for Heterogeneous Platform}, booktitle = {International Conference on Computational Science (ICCS 2015)}, year = {2015}, month = {2015-06}, address = {Reykjav{\'\i}k, Iceland}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {884, title = {Performance of Random Sampling for Computing Low-rank Approximations of a Dense Matrix on GPUs}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, author = {Theo Mary and Ichitaro Yamazaki and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {890, title = {Plan B: Interruption of Ongoing MPI Operations to Support Failure Recovery}, booktitle = {22nd European MPI Users{\textquoteright} Group Meeting}, year = {2015}, month = {2015-09}, publisher = {ACM}, organization = {ACM}, address = {Bordeaux, France}, abstract = {Advanced failure recovery strategies in HPC system benefit tremendously from in-place failure recovery, in which the MPI infrastructure can survive process crashes and resume communication services. In this paper we present the rationale behind the specification, and an effective implementation of the Revoke MPI operation. The purpose of the Revoke operation is the propagation of failure knowledge, and the interruption of ongoing, pending communication, under the control of the user. We explain that the Revoke operation can be implemented with a reliable broadcast over the scalable and failure resilient Binomial Graph (BMG) overlay network. Evaluation at scale, on a Cray XC30 supercomputer, demonstrates that the Revoke operation has a small latency, and does not introduce system noise outside of failure recovery periods.}, doi = {10.1145/2802658.2802668}, author = {Aurelien Bouteiller and George Bosilca and Jack Dongarra} } @techreport {872, title = {Practical Scalable Consensus for Pseudo-Synchronous Distributed Systems: Formal Proof}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-15-01}, year = {2015}, month = {2015-04}, author = {Thomas Herault and Aurelien Bouteiller and George Bosilca and Marc Gamell and Keita Teranishi and Manish Parashar and Jack Dongarra} } @conference {889, title = {Practical Scalable Consensus for Pseudo-Synchronous Distributed Systems}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {The ability to consistently handle faults in a distributed environment requires, among a small set of basic routines, an agreement algorithm allowing surviving entities to reach a consensual decision between a bounded set of volatile resources. This paper presents an algorithm that implements an Early Returning Agreement (ERA) in pseudo-synchronous systems, which optimistically allows a process to resume its activity while guaranteeing strong progress. We prove the correctness of our ERA algorithm, and expose its logarithmic behavior, which is an extremely desirable property for any algorithm which targets future exascale platforms. We detail a practical implementation of this consensus algorithm in the context of an MPI library, and evaluate both its efficiency and scalability through a set of benchmarks and two fault tolerant scientific applications.}, author = {Thomas Herault and Aurelien Bouteiller and George Bosilca and Marc Gamell and Keita Teranishi and Manish Parashar and Jack Dongarra} } @conference {885, title = {Randomized Algorithms to Update Partial Singular Value Decomposition on a Hybrid CPU/GPU Cluster}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, author = {Ichitaro Yamazaki and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @conference {893, title = {Random-Order Alternating Schwarz for Sparse Triangular Solves}, booktitle = {2015 SIAM Conference on Applied Linear Algebra (SIAM LA)}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {Block-asynchronous Jacobi is an iteration method where a locally synchronous iteration is embedded in an asynchronous global iteration. The unknowns are partitioned into small subsets, and while the components within the same subset are iterated in Jacobi fashion, no update order in-between the subsets is enforced. The values of the non-local entries remain constant during the local iterations, which can result in slow inter-subset information propagation and slow convergence. Interpreting of the subsets as subdomains allows to transfer the concept of domain overlap typically enhancing the information propagation to block-asynchronous solvers. In this talk we explore the impact of overlapping domains to convergence and performance of block-asynchronous Jacobi iterations, and present results obtained by running this solver class on state-of-the-art HPC systems.}, author = {Hartwig Anzt and Edmond Chow and Daniel Szyld and Jack Dongarra} } @article {925, title = {A Scalable Approach to Solving Dense Linear Algebra Problems on Hybrid CPU-GPU Systems}, journal = {Concurrency and Computation: Practice and Experience}, volume = {27}, year = {2015}, month = {2015-09}, pages = {3702-3723}, abstract = {Aiming to fully exploit the computing power of all CPUs and all graphics processing units (GPUs) on hybrid CPU-GPU systems to solve dense linear algebra problems, we design a class of heterogeneous tile algorithms to maximize the degree of parallelism, to minimize the communication volume, and to accommodate the heterogeneity between CPUs and GPUs. The new heterogeneous tile algorithms are executed upon our decentralized dynamic scheduling runtime system, which schedules a task graph dynamically and transfers data between compute nodes automatically. The runtime system uses a new distributed task assignment protocol to solve data dependencies between tasks without any coordination between processing units. By overlapping computation and communication through dynamic scheduling, we are able to attain scalable performance for the double-precision Cholesky factorization and QR factorization. Our approach demonstrates a performance comparable to Intel MKL on shared-memory multicore systems and better performance than both vendor (e.g., Intel MKL) and open source libraries (e.g., StarPU) in the following three environments: heterogeneous clusters with GPUs, conventional clusters without GPUs, and shared-memory systems with multiple GPUs.}, keywords = {dense linear algebra, distributed dataflow scheduling, heterogeneous HPC systems, runtime systems}, doi = {10.1002/cpe.3403}, author = {Fengguang Song and Jack Dongarra} } @article {699, title = {A Survey of Recent Developments in Parallel Implementations of Gaussian Elimination}, journal = {Concurrency and Computation: Practice and Experience}, volume = {27}, year = {2015}, month = {2015-04}, pages = {1292-1309}, abstract = {Gaussian elimination is a canonical linear algebra procedure for solving linear systems of equations. In the last few years, the algorithm has received a lot of attention in an attempt to improve its parallel performance. This article surveys recent developments in parallel implementations of Gaussian elimination for shared memory architecture. Five different flavors are investigated. Three of them are based on different strategies for pivoting: partial pivoting, incremental pivoting, and tournament pivoting. The fourth one replaces pivoting with the Partial Random Butterfly Transformation, and finally, an implementation without pivoting is used as a performance baseline. The technique of iterative refinement is applied to recover numerical accuracy when necessary. All parallel implementations are produced using dynamic, superscalar, runtime scheduling and tile matrix layout. Results on two multisocket multicore systems are presented. Performance and numerical accuracy is analyzed.}, keywords = {Gaussian elimination, lu factorization, Multicore, parallel, plasma, shared memory}, doi = {10.1002/cpe.3306}, author = {Simplice Donfack and Jack Dongarra and Mathieu Faverge and Mark Gates and Jakub Kurzak and Piotr Luszczek and Ichitaro Yamazaki} } @article {926, title = {The TOP500 List and Progress in High-Performance Computing}, journal = {IEEE Computer}, volume = {48}, year = {2015}, month = {2015-11}, pages = {42-49}, abstract = {For more than two decades, the TOP500 list has enjoyed incredible success as a metric for supercomputing performance and as a source of data for identifying technological trends. The project{\textquoteright}s editors reflect on its usefulness and limitations for guiding large-scale scientific computing into the exascale era.}, keywords = {application performance, Benchmark testing, benchmarks, Computer architecture, High Performance Computing, High-performance computing, Linpack, Market research, Parallel computing, Program processors, Scientific computing, Supercomputers, top500}, doi = {doi:10.1109/MC.2015.338}, author = {Erich Strohmaier and Hans Meuer and Jack Dongarra and Horst D. Simon} } @article {1346, title = {Towards a High-Performance Tensor Algebra Package for Accelerators}, year = {2015}, month = {2015-09}, publisher = {moky Mountains Computational Sciences and Engineering Conference (SMC15)}, address = {Gatlinburg, TN}, author = {Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @conference {844, title = {Towards Batched Linear Solvers on Accelerated Hardware Platforms}, booktitle = {8th Workshop on General Purpose Processing Using GPUs (GPGPU 8) co-located with PPOPP 2015}, year = {2015}, month = {2015-02}, publisher = {ACM}, organization = {ACM}, address = {San Francisco, CA}, abstract = {As hardware evolves, an increasingly effective approach to develop energy efficient, high-performance solvers, is to design them to work on many small and independent problems. Indeed, many applications already need this functionality, especially for GPUs, which are known to be currently about four to five times more energy efficient than multicore CPUs for every floating-point operation. In this paper, we describe the development of the main one-sided factorizations: LU, QR, and Cholesky; that are needed for a set of small dense matrices to work in parallel. We refer to such algorithms as batched factorizations. Our approach is based on representing the algorithms as a sequence of batched BLAS routines for GPU-contained execution. Note that this is similar in functionality to the LAPACK and the hybrid MAGMA algorithms for large-matrix factorizations. But it is different from a straightforward approach, whereby each of GPU{\textquoteright}s symmetric multiprocessors factorizes a single problem at a time.We illustrate how our performance analysis together with the profiling and tracing tools guided the development of batched factorizations to achieve up to 2-fold speedup and 3-fold better energy efficiency compared to our highly optimized batched CPU implementations based on the MKL library on a two-sockets, Intel Sandy Bridge server. Compared to a batched LU factorization featured in the NVIDIA{\textquoteright}s CUBLAS library for GPUs, we achieves up to 2.5-fold speedup on the K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {911, title = {Tuning Stationary Iterative Solvers for Fault Resilience}, booktitle = {6th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {As the transistor{\textquoteright}s feature size decreases following Moore{\textquoteright}s Law, hardware will become more prone to permanent, intermittent, and transient errors, increasing the number of failures experienced by applications, and diminishing the confidence of users. As a result, resilience is considered the most difficult under addressed issue faced by the High Performance Computing community. In this paper, we address the design of error resilient iterative solvers for sparse linear systems. Contrary to most previous approaches, based on Krylov subspace methods, for this purpose we analyze stationary component-wise relaxation. Concretely, starting from a plain implementation of the Jacobi iteration, we design a low-cost component-wise technique that elegantly handles bit-flips, turning the initial synchronized solver into an asynchronous iteration. Our experimental study employs sparse incomplete factorizations from several practical applications to expose the convergence delay incurred by the fault-tolerant implementation.}, author = {Hartwig Anzt and Jack Dongarra and Enrique S. Quintana-Orti} } @conference {892, title = {Visualizing Execution Traces with Task Dependencies}, booktitle = {2nd Workshop on Visual Performance Analysis (VPA {\textquoteright}15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {Task-based scheduling has emerged as one method to reduce the complexity of parallel computing. When using task-based schedulers, developers must frame their computation as a series of tasks with various data dependencies. The scheduler can take these tasks, along with their input and output dependencies, and schedule the task in parallel across a node or cluster. While these schedulers simplify the process of parallel software development, they can obfuscate the performance characteristics of the execution of an algorithm. The execution trace has been used for many years to give developers a visual representation of how their computations are performed. These methods can be employed to visualize when and where each of the tasks in a task-based algorithm is scheduled. In addition, the task dependencies can be used to create a directed acyclic graph (DAG) that can also be visualized to demonstrate the dependencies of the various tasks that make up a workload. The work presented here aims to combine these two data sets and extend execution trace visualization to better suit task-based workloads. This paper presents a brief description of task-based schedulers and the performance data they produce. It will then describe an interactive extension to the current trace visualization methods that combines the trace and DAG data sets. This new tool allows users to gain a greater understanding of how their tasks are scheduled. It also provides a simplified way for developers to evaluate and debug the performance of their scheduler. }, author = {Blake Haugen and Stephen Richmond and Jakub Kurzak and Chad A. Steed and Jack Dongarra} } @inproceedings {959, title = {Weighted Dynamic Scheduling with Many Parallelism Grains for Offloading of Numerical Workloads to Multiple Varied Accelerators}, journal = {Proceedings of the 6th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA{\textquoteright}15)}, volume = {No. 5}, year = {2015}, month = {2015-11}, publisher = {ACM}, address = {Austin, TX}, abstract = {A wide variety of heterogeneous compute resources are available to modern computers, including multiple sockets containing multicore CPUs, one-or-more GPUs of varying power, and coprocessors such as the Intel Xeon Phi. The challenge faced by domain scientists is how to efficiently and productively use these varied resources. For example, in order to use GPUs effectively, the workload must have a greater degree of parallelism than a workload designed for a multicore-CPU. The domain scientist would have to design and schedule an application in multiple degrees of parallelism and task grain sizes in order to obtain efficient performance from the resources. We propose a productive programming model starting from serial code, which achieves parallelism and scalability by using a task-superscalar runtime environment to adapt the computation to the available resources. The adaptation is done at multiple points, including multi-level data partitioning, adaptive task grain sizes, and dynamic task scheduling. The effectiveness of this approach for utilizing multi-way heterogeneous hardware resources is demonstrated by implementing dense linear algebra applications.}, keywords = {dataflow scheduling, hardware accelerators, multi-grain parallelism}, author = {Azzam Haidar and Yulu Jia and Piotr Luszczek and Stanimire Tomov and Asim YarKhan and Jack Dongarra} } @conference {766, title = {Accelerating Eigenvector Computation in the Nonsymmetric Eigenvalue Problem}, booktitle = {VECPAR 2014}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {In the nonsymmetric eigenvalue problem, work has focused on the Hessenberg reduction and QR iteration, using efficient algorithms and fast, Level 3 BLAS routines. Comparatively, computation of eigenvectors performs poorly, limited to slow, Level 2 BLAS performance with little speedup on multi-core systems. It has thus become a dominant cost in the eigenvalue problem. To address this, we present improvements for the eigenvector computation to use Level 3 BLAS where applicable and parallelize the remaining triangular solves, achieving good parallel scaling and accelerating the overall eigenvalue problem more than three-fold.}, author = {Mark Gates and Azzam Haidar and Jack Dongarra} } @inbook {780, title = {Accelerating Numerical Dense Linear Algebra Calculations with GPUs}, booktitle = {Numerical Computations with GPUs}, year = {2014}, pages = {3-28}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, chapter = {1}, isbn = {978-3-319-06547-2}, doi = {10.1007/978-3-319-06548-9_1}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @techreport {837, title = {Accelerating the LOBPCG method on GPUs using a blocked Sparse Matrix Vector Product}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-14-731}, year = {2014}, month = {2014-10}, publisher = {University of Tennessee}, abstract = {This paper presents a heterogeneous CPU-GPU algorithm design and optimized implementation for an entire sparse iterative eigensolver {\textendash} the Locally Optimal Block Preconditioned Conjugate Gradient (LOBPCG) {\textendash} starting from low-level GPU data structures and kernels to the higher-level algorithmic choices and overall heterogeneous design. Most notably, the eigensolver leverages the high-performance of a new GPU kernel developed for the simultaneous multiplication of a sparse matrix and a set of vectors (SpMM). This is a building block that serves as a backbone for not only block-Krylov, but also for other methods relying on blocking for acceleration in general. The heterogeneous LOBPCG developed here reveals the potential of this type of eigensolver by highly optimizing all of its components, and can be viewed as a benchmark for other SpMM-dependent applications. Compared to non-blocked algorithms, we show that the performance speedup factor of SpMM vs. SpMV-based algorithms is up to six on GPUs like NVIDIA{\textquoteright}s K40. In particular, a typical SpMV performance range in double precision is 20 to 25 GFlop/s, while the SpMM is in the range of 100 to 120 GFlop/s. Compared to highly-optimized CPU implementations, e.g., the SpMM from MKL on two eight-core Intel Xeon E5-2690s, our kernel is 3 to 5x. faster on a K40 GPU. For comparison to other computational loads, the same GPU to CPU performance acceleration is observed for the SpMV product, as well as dense linear algebra, e.g., matrix-matrix multiplication and factorizations like LU, QR, and Cholesky. Thus, the modeled GPU (vs. CPU) acceleration for the entire solver is also 3 to 5x. In practice though, currently available CPU implementations are much slower due to missed optimization opportunities, as we show.}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @conference {818, title = {Access-averse Framework for Computing Low-rank Matrix Approximations}, booktitle = {First International Workshop on High Performance Big Graph Data Management, Analysis, and Mining}, year = {2014}, month = {2014-10}, address = {Washington, DC}, author = {Ichitaro Yamazaki and Theo Mary and Jakub Kurzak and Stanimire Tomov and Jack Dongarra} } @article {698, title = {Achieving numerical accuracy and high performance using recursive tile LU factorization with partial pivoting}, journal = {Concurrency and Computation: Practice and Experience}, volume = {26}, year = {2014}, month = {2014-05}, pages = {1408-1431}, chapter = {1408}, abstract = {The LU factorization is an important numerical algorithm for solving systems of linear equations in science and engineering and is a characteristic of many dense linear algebra computations. For example, it has become the de facto numerical algorithm implemented within the LINPACK benchmark to rank the most powerful supercomputers in the world, collected by the TOP500 website. Multicore processors continue to present challenges to the development of fast and robust numerical software due to the increasing levels of hardware parallelism and widening gap between core and memory speeds. In this context, the difficulty in developing new algorithms for the scientific community resides in the combination of two goals: achieving high performance while maintaining the accuracy of the numerical algorithm. This paper proposes a new approach for computing the LU factorization in parallel on multicore architectures, which not only improves the overall performance but also sustains the numerical quality of the standard LU factorization algorithm with partial pivoting. While the update of the trailing submatrix is computationally intensive and highly parallel, the inherently problematic portion of the LU factorization is the panel factorization due to its memory-bound characteristic as well as the atomicity of selecting the appropriate pivots. Our approach uses a parallel fine-grained recursive formulation of the panel factorization step and implements the update of the trailing submatrix with the tile algorithm. Based on conflict-free partitioning of the data and lockless synchronization mechanisms, our implementation lets the overall computation flow naturally without contention. The dynamic runtime system called QUARK is then able to schedule tasks with heterogeneous granularities and to transparently introduce algorithmic lookahead. The performance results of our implementation are competitive compared to the currently available software packages and libraries. For example, it is up to 40\% faster when compared to the equivalent Intel MKL routine and up to threefold faster than LAPACK with multithreaded Intel MKL BLAS.}, keywords = {factorization, parallel linear algebra, plasma, recursion, shared memory synchronization, threaded parallelism}, doi = {10.1002/cpe.3110}, url = {http://doi.wiley.com/10.1002/cpe.3110}, author = {Jack Dongarra and Mathieu Faverge and Hatem Ltaeif and Piotr Luszczek} } @conference {708, title = {Assessing the Impact of ABFT and Checkpoint Composite Strategies}, booktitle = {16th Workshop on Advances in Parallel and Distributed Computational Models, IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Algorithm-specific fault tolerant approaches promise unparalleled scalability and performance in failure-prone environments. With the advances in the theoretical and practical understanding of algorithmic traits enabling such approaches, a growing number of frequently used algorithms (including all widely used factorization kernels) have been proven capable of such properties. These algorithms provide a temporal section of the execution when the data is protected by it{\textquoteright}s own intrinsic properties, and can be algorithmically recomputed without the need of checkpoints. However, while typical scientific applications spend a significant fraction of their execution time in library calls that can be ABFT-protected, they interleave sections that are difficult or even impossible to protect with ABFT. As a consequence, the only fault-tolerance approach that is currently used for these applications is checkpoint/restart. In this paper we propose a model and a simulator to investigate the behavior of a composite protocol, that alternates between ABFT and checkpoint/restart protection for effective protection of each phase of an iterative application composed of ABFT-aware and ABFTunaware sections. We highlight this approach drastically increases the performance delivered by the system, especially at scale, by providing means to rarefy the checkpoints while simultaneously decreasing the volume of data needed to be checkpointed.}, keywords = {ABFT, checkpoint, fault-tolerance, High-performance computing, resilience}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Yves Robert and Jack Dongarra} } @conference {836, title = {clMAGMA: High Performance Dense Linear Algebra with OpenCL }, booktitle = {International Workshop on OpenCL}, year = {2014}, month = {2014-05}, address = {Bristol University, England}, abstract = {This paper presents the design and implementation of several fundamental dense linear algebra (DLA) algorithms in OpenCL. In particular, these are linear system solvers and eigenvalue problem solvers. Further, we give an overview of the clMAGMA library, an open source, high performance OpenCL library that incorporates the developments presented, and in general provides to heterogeneous architectures the DLA functionality of the popular LAPACK library. The LAPACK-compliance and use of OpenCL simplify the use of clMAGMA in applications, while providing them with portably performant DLA. High performance is obtained through use of the high-performance OpenCL BLAS, hardware and OpenCL-specific tuning, and a hybridization methodology where we split the algorithm into computational tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components.}, author = {Chongxiao Cao and Jack Dongarra and Peng Du and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @article {815, title = {Communication-Avoiding Symmetric-Indefinite Factorization}, journal = {SIAM Journal on Matrix Analysis and Application}, volume = {35}, year = {2014}, month = {2014-07}, pages = {1364-1406}, abstract = {We describe and analyze a novel symmetric triangular factorization algorithm. The algorithm is essentially a block version of Aasen{\textquoteright}s triangular tridiagonalization. It factors a dense symmetric matrix A as the product A = P LT L T P T where P is a permutation matrix, L is lower triangular, and T is block tridiagonal and banded. The algorithm is the first symmetric-indefinite communication-avoiding factorization: it performs an asymptotically optimal amount of communication in a two-level memory hierarchy for almost any cache-line size. Adaptations of the algorithm to parallel computers are likely to be communication efficient as well; one such adaptation has been recently published. The current paper describes the algorithm, proves that it is numerically stable, and proves that it is communication optimal.}, keywords = {plasma}, author = {Grey Ballard and Dulceneia Becker and James Demmel and Jack Dongarra and Alex Druinsky and I Peled and Oded Schwartz and Sivan Toledo and Ichitaro Yamazaki} } @conference {832, title = {Computing Least Squares Condition Numbers on Hybrid Multicore/GPU Systems}, booktitle = {International Interdisciplinary Conference on Applied Mathematics, Modeling and Computational Science (AMMCS)}, year = {2014}, month = {2014-08}, address = {Waterloo, Ontario, CA}, abstract = {This paper presents an efficient computation for least squares conditioning or estimates of it. We propose performance results using new routines on top of the multicore-GPU library MAGMA. This set of routines is based on an efficient computation of the variance-covariance matrix for which, to our knowledge, there is no implementation in current public domain libraries LAPACK and ScaLAPACK.}, author = {Marc Baboulin and Jack Dongarra and Remi Lacroix} } @conference {819, title = {Deflation Strategies to Improve the Convergence of Communication-Avoiding GMRES}, booktitle = {5th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2014}, month = {2014-11}, address = {New Orleans, LA}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @conference {710, title = {Design and Implementation of a Large Scale Tree-Based QR Decomposition Using a 3D Virtual Systolic Array and a Lightweight Runtime}, booktitle = {Workshop on Large-Scale Parallel Processing, IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {A systolic array provides an alternative computing paradigm to the von Neuman architecture. Though its hardware implementation has failed as a paradigm to design integrated circuits in the past, we are now discovering that the systolic array as a software virtualization layer can lead to an extremely scalable execution paradigm. To demonstrate this scalability, in this paper, we design and implement a 3D virtual systolic array to compute a tile QR decomposition of a tall-and-skinny dense matrix. Our implementation is based on a state-of-the-art algorithm that factorizes a panel based on a tree-reduction. Using a runtime developed as a part of the Parallel Ultra Light Systolic Array Runtime (PULSAR) project, we demonstrate on a Cray-XT5 machine how our virtual systolic array can be mapped to a large-scale machine and obtain excellent parallel performance. This is an important contribution since such a QR decomposition is used, for example, to compute a least squares solution of an overdetermined system, which arises in many scientific and engineering problems.}, keywords = {dataflow, message-passing, multithreading, QR decomposition, runtime, systolic array}, author = {Ichitaro Yamazaki and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @techreport {822, title = {Design for a Soft Error Resilient Dynamic Task-based Runtime}, journal = {ICL Technical Report}, number = {ICL-UT-14-04}, year = {2014}, month = {2014-11}, publisher = {University of Tennessee}, abstract = {Abstract{\textemdash}As the scale of modern computing systems grows, failures will happen more frequently. On the way to Exascale a generic, low-overhead, resilient extension becomes a desired aptitude of any programming paradigm. In this paper we explore three additions to a dynamic task-based runtime to build a generic framework providing soft error resilience to task-based programming paradigms. The first recovers the application by re-executing the minimum required sub-DAG, the second takes critical checkpoints of the data flowing between tasks to minimize the necessary re-execution, while the last one takes advantage of algorithmic properties to recover the data without re-execution. These mechanisms have been implemented in the PaRSEC task-based runtime framework. Experimental results validate our approach and quantify the overhead introduced by such mechanisms.}, author = {Chongxiao Cao and Thomas Herault and George Bosilca and Jack Dongarra} } @conference {813, title = {Designing LU-QR Hybrid Solvers for Performance and Stability}, booktitle = {IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {This paper introduces hybrid LU-QR algorithms for solving dense linear systems of the form Ax = b. Throughout a matrix factorization, these algorithms dynamically alternate LU with local pivoting and QR elimination steps, based upon some robustness criterion. LU elimination steps can be very efficiently parallelized, and are twice as cheap in terms of operations, as QR steps. However, LU steps are not necessarily stable, while QR steps are always stable. The hybrid algorithms execute a QR step when a robustness criterion detects some risk for instability, and they execute an LU step otherwise. Ideally, the choice between LU and QR steps must have a small computational overhead and must provide a satisfactory level of stability with as few QR steps as possible. In this paper, we introduce several robustness criteria and we establish upper bounds on the growth factor of the norm of the updated matrix incurred by each of these criteria. In addition, we describe the implementation of the hybrid algorithms through an extension of the Parsec software to allow for dynamic choices during execution. Finally, we analyze both stability and performance results compared to state-of-the-art linear solvers on parallel distributed multicore platforms.}, keywords = {plasma}, isbn = {978-1-4799-3800-1}, doi = {10.1109/IPDPS.2014.108}, author = {Mathieu Faverge and Julien Herrmann and Julien Langou and Bradley Lowery and Yves Robert and Jack Dongarra} } @conference {834, title = {Dynamically balanced synchronization-avoiding LU factorization with multicore and GPUs}, booktitle = {Fourth International Workshop on Accelerators and Hybrid Exascale Systems (AsHES), IPDPS 2014}, year = {2014}, month = {2014-05}, abstract = {Graphics processing units (GPUs) brought huge performance improvements in the scientific and numerical fields. We present an efficient hybrid CPU/GPU approach that is portable, dynamically and efficiently balances the workload between the CPUs and the GPUs, and avoids data transfer bottlenecks that are frequently present in numerical algorithms. Our approach determines the amount of initial work to assign to the CPUs before the execution, and then dynamically balances workloads during the execution. Then, we present a theoretical model to guide the choice of the initial amount of work for the CPUs. The validation of our model allows our approach to self-adapt on any architecture using the manufacturer{\textquoteright}s characteristics of the underlying machine. We illustrate our method for the LU factorization. For this case, we show that the use of our approach combined with a communication avoiding LU algorithm is efficient. For example, our experiments on a 24 cores AMD Opteron 6172 show that by adding one GPU (Tesla S2050) we accelerate LU up to 2.4x compared to the corresponding routine in MKL using 24 cores. The comparisons with MAGMA also show significant improvements.}, author = {Simplice Donfack and Stanimire Tomov and Jack Dongarra} } @article {821, title = {An Efficient Distributed Randomized Algorithm for Solving Large Dense Symmetric Indefinite Linear Systems}, journal = {Parallel Computing}, volume = {40}, year = {2014}, month = {2014-07}, pages = {213-223}, abstract = {Randomized algorithms are gaining ground in high-performance computing applications as they have the potential to outperform deterministic methods, while still providing accurate results. We propose a randomized solver for distributed multicore architectures to efficiently solve large dense symmetric indefinite linear systems that are encountered, for instance, in parameter estimation problems or electromagnetism simulations. The contribution of this paper is to propose efficient kernels for applying random butterfly transformations and a new distributed implementation combined with a runtime (PaRSEC) that automatically adjusts data structures, data mappings, and the scheduling as systems scale up. Both the parallel distributed solver and the supporting runtime environment are innovative. To our knowledge, the randomization approach associated with this solver has never been used in public domain software for symmetric indefinite systems. The underlying runtime framework allows seamless data mapping and task scheduling, mapping its capabilities to the underlying hardware features of heterogeneous distributed architectures. The performance of our software is similar to that obtained for symmetric positive definite systems, but requires only half the execution time and half the amount of data storage of a general dense solver.}, keywords = {Distributed linear algebra solvers, LDLT factorization, PaRSEC runtime, plasma, Randomized algorithms, Symmetric indefinite systems}, doi = {10.1016/j.parco.2013.12.003}, author = {Marc Baboulin and Du Becker and George Bosilca and Anthony Danalis and Jack Dongarra} } @conference {715, title = {A Fast Batched Cholesky Factorization on a GPU}, booktitle = {International Conference on Parallel Processing (ICPP-2014)}, year = {2014}, month = {2014-09}, address = {Minneapolis, MN}, abstract = {Currently, state of the art libraries, like MAGMA, focus on very large linear algebra problems, while solving many small independent problems, which is usually referred to as batched problems, is not given adequate attention. In this paper, we proposed a batched Cholesky factorization on a GPU. Three algorithms {\textendash} nonblocked, blocked, and recursive blocked {\textendash} were examined. The left-looking version of the Cholesky factorization is used to factorize the panel, and the right-looking Cholesky version is used to update the trailing matrix in the recursive blocked algorithm. Our batched Cholesky achieves up to 1:8 speedup compared to the optimized parallel implementation in the MKL library on two sockets of Intel Sandy Bridge CPUs. Further, we use the new routines to develop a single Cholesky factorization solver which targets large matrix sizes. Our approach differs from MAGMA by having an entirely GPU implementation where both the panel factorization and the trailing matrix updates are on the GPU. Such an implementation does not depend on the speed of the CPU. Compared to the MAGMA library, our full GPU solution achieves 85\% of the hybrid MAGMA performance which uses 16 Sandy Bridge cores, in addition to a K40 Nvidia GPU. Moreover, we achieve 80\% of the practical dgemm peak of the machine, while MAGMA achieves only 75\%, and finally, in terms of energy consumption, we outperform MAGMA by 1.5 in performance-per-watt for large matrices.}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {765, title = {Heterogeneous Acceleration for Linear Algebra in Mulit-Coprocessor Environments}, booktitle = {VECPAR 2014}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {We present an efficient and scalable programming model for the development of linear algebra in heterogeneous multi-coprocessor environments. The model incorporates some of the current best design and implementation practices for the heterogeneous acceleration of dense linear algebra (DLA). Examples are given as the basis for solving linear systems{\textquoteright} algorithms {\textendash} the LU, QR, and Cholesky factorizations. To generate the extreme level of parallelism needed for the efficient use of coprocessors, algorithms of interest are redesigned and then split into well-chosen computational tasks. The tasks execution is scheduled over the computational components of a hybrid system of multi-core CPUs and coprocessors using a light-weight runtime system. The use of light-weight runtime systems keeps scheduling overhead low, while enabling the expression of parallelism through otherwise sequential code. This simplifies the development efforts and allows the exploration of the unique strengths of the various hardware components.}, keywords = {Computer science, factorization, Heterogeneous systems, Intel Xeon Phi, linear algebra}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {812, title = {Hybrid Multi-Elimination ILU Preconditioners on GPUs}, booktitle = {International Heterogeneity in Computing Workshop (HCW), IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Abstract{\textemdash}Iterative solvers for sparse linear systems often benefit from using preconditioners. While there are implementations for many iterative methods that leverage the computing power of accelerators, porting the latest developments in preconditioners to accelerators has been challenging. In this paper we develop a selfadaptive multi-elimination preconditioner for graphics processing units (GPUs). The preconditioner is based on a multi-level incomplete LU factorization and uses a direct dense solver for the bottom-level system. For test matrices from the University of Florida matrix collection, we investigate the influence of handling the triangular solvers in the distinct iteration steps in either single or double precision arithmetic. Integrated into a Conjugate Gradient method, we show that our multi-elimination algorithm is highly competitive against popular preconditioners, including multi-colored symmetric Gauss-Seidel relaxation preconditioners, and (multi-colored symmetric) ILU for numerous problems.}, author = {Dimitar Lukarski and Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @techreport {838, title = {Implementing a Sparse Matrix Vector Product for the SELL-C/SELL-C-σ formats on NVIDIA GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-14-727}, year = {2014}, month = {2014-04}, publisher = {University of Tennessee}, abstract = {Numerical methods in sparse linear algebra typically rely on a fast and efficient matrix vector product, as this usually is the backbone of iterative algorithms for solving eigenvalue problems or linear systems. Against the background of a large diversity in the characteristics of high performance computer architectures, it is a challenge to derive a cross-platform efficient storage format along with fast matrix vector kernels. Recently, attention focused on the SELL-C- format, a sliced ELLPACK format enhanced by row-sorting to reduce the fill in when padding rows with zeros. In this paper we propose an additional modification resulting in the padded sliced ELLPACK (SELLP) format, for which we develop a sparse matrix vector CUDA kernel that is able to efficiently exploit the computing power of NVIDIA GPUs. We show that the kernel we developed outperforms straight-forward implementations for the widespread CSR and ELLPACK formats, and is highly competitive to the implementations in the highly optimized CUSPARSE library.}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra} } @conference {807, title = {Improving the performance of CA-GMRES on multicores with multiple GPUs}, booktitle = {IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Abstract{\textemdash}The Generalized Minimum Residual (GMRES) method is one of the most widely-used iterative methods for solving nonsymmetric linear systems of equations. In recent years, techniques to avoid communication in GMRES have gained attention because in comparison to floating-point operations, communication is becoming increasingly expensive on modern computers. Since graphics processing units (GPUs) are now becoming crucial component in computing, we investigate the effectiveness of these techniques on multicore CPUs with multiple GPUs. While we present the detailed performance studies of a matrix powers kernel on multiple GPUs, we particularly focus on orthogonalization strategies that have a great impact on both the numerical stability and performance of GMRES, especially as the matrix becomes sparser or ill-conditioned. We present the experimental results on two eight-core Intel Sandy Bridge CPUs with three NDIVIA Fermi GPUs and demonstrate that significant speedups can be obtained by avoiding communication, either on a GPU or between the GPUs. As part of our study, we investigate several optimization techniques for the GPU kernels that can also be used in other iterative solvers besides GMRES. Hence, our studies not only emphasize the importance of avoiding communication on GPUs, but they also provide insight about the effects of these optimization techniques on the performance of the sparse solvers, and may have greater impact beyond GMRES.}, author = {Ichitaro Yamazaki and Hartwig Anzt and Stanimire Tomov and Mark Hoemmen and Jack Dongarra} } @article {760, title = {Looking Back at Dense Linear Algebra Software}, journal = {Journal of Parallel and Distributed Computing}, volume = {74}, year = {2014}, month = {2014-07}, pages = {2548{\textendash}2560}, chapter = {2548}, abstract = {Over the years, computational physics and chemistry served as an ongoing source of problems that demanded the ever increasing performance from hardware as well as the software that ran on top of it. Most of these problems could be translated into solutions for systems of linear equations: the very topic of numerical linear algebra. Seemingly then, a set of efficient linear solvers could be solving important scientific problems for years to come. We argue that dramatic changes in hardware designs precipitated by the shifting nature of the marketplace of computer hardware had a continuous effect on the software for numerical linear algebra. The extraction of high percentages of peak performance continues to require adaptation of software. If the past history of this adaptive nature of linear algebra software is any guide then the future theme will feature changes as well{\textendash}changes aimed at harnessing the incredible advances of the evolving hardware infrastructure.}, keywords = {decompositional approach, dense linear algebra, parallel algorithms}, doi = {10.1016/j.jpdc.2013.10.005}, author = {Piotr Luszczek and Jakub Kurzak and Jack Dongarra} } @conference {859, title = {LU Factorization of Small Matrices: Accelerating Batched DGETRF on the GPU}, booktitle = {16th IEEE International Conference on High Performance Computing and Communications (HPCC)}, year = {2014}, month = {2014-08}, publisher = {IEEE}, organization = {IEEE}, address = {Paris, France}, abstract = {Gaussian Elimination is commonly used to solve dense linear systems in scientific models. In a large number of applications, a need arises to solve many small size problems, instead of few large linear systems. The size of each of these small linear systems depends, for example, on the number of the ordinary differential equations (ODEs) used in the model, and can be on the order of hundreds of unknowns. To efficiently exploit the computing power of modern accelerator hardware, these linear systems are processed in batches. To improve the numerical stability of the Gaussian Elimination, at least partial pivoting is required, most often accomplished with row pivoting. However, row pivoting can result in a severe performance penalty on GPUs because it brings in thread divergence and non-coalesced memory accesses. The state-of-the-art libraries for linear algebra that target GPUs, such as MAGMA, focus on large matrix sizes. They change the data layout by transposing the matrix to avoid these divergence and non-coalescing penalties. However, the data movement associated with transposition is very expensive for small matrices. In this paper, we propose a batched LU factorization for GPUs by using a multi-level blocked right looking algorithm that preserves the data layout but minimizes the penalty of partial pivoting. Our batched LU achieves up to 2:5-fold speedup when compared to the alternative CUBLAS solutions on a K40c GPU and 3:6-fold speedup over MKL on a node of the Titan supercomputer at ORNL in a nuclear reaction network simulation.}, author = {Tingxing Dong and Azzam Haidar and Piotr Luszczek and James Harris and Stanimire Tomov and Jack Dongarra} } @conference {811, title = {MIAMI: A Framework for Application Performance Diagnosis }, booktitle = {IPASS-2014}, year = {2014}, month = {2014-03}, publisher = {IEEE}, organization = {IEEE}, address = {Monterey, CA}, abstract = {A typical application tuning cycle repeats the following three steps in a loop: performance measurement, analysis of results, and code refactoring. While performance measurement is well covered by existing tools, analysis of results to understand the main sources of inefficiency and to identify opportunities for optimization is generally left to the user. Today{\textquoteright}s state of the art performance analysis tools use instrumentation or hardware counter sampling to measure the performance of interactions between code and the target architecture during execution. Such measurements are useful to identify hotspots in applications, places where execution time is spent or where cache misses are incurred. However, explanatory understanding of tuning opportunities requires a more detailed, mechanistic modeling approach. This paper presents MIAMI (Machine Independent Application Models for performance Insight), a set of tools for automatic performance diagnosis. MIAMI uses application characterization and models of target architectures to reason about an application{\textquoteright}s performance. MIAMI uses a modeling approach based on first-order principles to identify performance bottlenecks, pinpoint optimization opportunities, and compute bounds on the potential for improvement.}, isbn = {978-1-4799-3604-5}, doi = {10.1109/ISPASS.2014.6844480}, author = {Gabriel Marin and Jack Dongarra and Dan Terpstra} } @conference {713, title = {Mixed-precision orthogonalization scheme and adaptive step size for CA-GMRES on GPUs}, booktitle = {VECPAR 2014 (Best Paper)}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {We propose a mixed-precision orthogonalization scheme that takes the input matrix in a standard 32 or 64-bit floating-point precision, but uses higher-precision arithmetics to accumulate its intermediate results. For the 64-bit precision, our scheme uses software emulation for the higher-precision arithmetics, and requires about 20x more computation but about the same amount of communication as the standard orthogonalization scheme. Since the computation is becoming less expensive compared to the communication on new and emerging architectures, the relative cost of our mixed-precision scheme is decreasing. Our case studies with CA-GMRES on a GPU demonstrate that using mixed-precision for this small but critical segment of CA-GMRES can improve not only its overall numerical stability but also, in some cases, its performance.}, author = {Ichitaro Yamazaki and Stanimire Tomov and Tingxing Dong and Jack Dongarra} } @article {831, title = {Model-Driven One-Sided Factorizations on Multicore, Accelerated Systems}, journal = {Supercomputing Frontiers and Innovations}, volume = {1}, year = {2014}, abstract = {Hardware heterogeneity of the HPC platforms is no longer considered unusual but instead have become the most viable way forward towards Exascale. In fact, the multitude of the heterogeneous resources available to modern computers are designed for different workloads and their efficient use is closely aligned with the specialized role envisaged by their design. Commonly in order to efficiently use such GPU resources, the workload in question must have a much greater degree of parallelism than workloads often associated with multicore processors (CPUs). Available GPU variants differ in their internal architecture and, as a result, are capable of handling workloads of varying degrees of complexity and a range of computational patterns. This vast array of applicable workloads will likely lead to an ever accelerated mixing of multicore-CPUs and GPUs in multi-user environments with the ultimate goal of offering adequate computing facilities for a wide range of scientific and technical workloads. In the following paper, we present a research prototype that uses a lightweight runtime environment to manage the resource-specific workloads, and to control the dataflow and parallel execution in hybrid systems. Our lightweight runtime environment uses task superscalar concepts to enable the developer to write serial code while providing parallel execution. This concept is reminiscent of dataflow and systolic architectures in its conceptualization of a workload as a set of side-effect-free tasks that pass data items whenever the associated work assignment have been completed. Additionally, our task abstractions and their parametrization enable uniformity in the algorithmic development across all the heterogeneous resources without sacrificing precious compute cycles. We include performance results for dense linear algebra functions which demonstrate the practicality and effectiveness of our approach that is aptly capable of full utilization of a wide range of accelerator hardware.}, keywords = {dense linear algebra, hardware accelerators, task superscalar scheduling}, doi = {http://dx.doi.org/10.14529/jsfi1401}, author = {Jack Dongarra and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Asim YarKhan} } @conference {839, title = {New Algorithm for Computing Eigenvectors of the Symmetric Eigenvalue Problem}, booktitle = {Workshop on Parallel and Distributed Scientific and Engineering Computing, IPDPS 2014 (Best Paper)}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {We describe a design and implementation of a multi-stage algorithm for computing eigenvectors of a dense symmetric matrix. We show that reformulating the existing algorithms is beneficial in terms of performance even if that doubles the computational complexity. Through detailed analysis, we show that the effect of the increase in the asymptotic operation count may be compensated by a much improved performance rate. Our performance results indicate that using our approach achieves very good speedup and scalability even when directly compared with the existing state-of-the-art software.}, doi = {10.1109/IPDPSW.2014.130}, author = {Azzam Haidar and Piotr Luszczek and Jack Dongarra} } @article {758, title = {A Novel Hybrid CPU-GPU Generalized Eigensolver for Electronic Structure Calculations Based on Fine Grained Memory Aware Tasks}, journal = {International Journal of High Performance Computing Applications}, volume = {28}, year = {2014}, month = {2014-05}, pages = {196-209}, chapter = {196}, abstract = {The adoption of hybrid CPU{\textendash}GPU nodes in traditional supercomputing platforms such as the Cray-XK6 opens acceleration opportunities for electronic structure calculations in materials science and chemistry applications, where medium-sized generalized eigenvalue problems must be solved many times. These eigenvalue problems are too small to effectively solve on distributed systems, but can benefit from the massive computing power concentrated on a single-node, hybrid CPU{\textendash}GPU system. However, hybrid systems call for the development of new algorithms that efficiently exploit heterogeneity and massive parallelism of not just GPUs, but of multicore/manycore CPUs as well. Addressing these demands, we developed a generalized eigensolver featuring novel algorithms of increased computational intensity (compared with the standard algorithms), decomposition of the computation into fine-grained memory aware tasks, and their hybrid execution. The resulting eigensolvers are state-of-the-art in high-performance computing, significantly outperforming existing libraries. We describe the algorithm and analyze its performance impact on applications of interest when different fractions of eigenvectors are needed by the host electronic structure code. }, keywords = {Eigensolver, electronic structure calculations, generalized eigensolver, gpu, high performance, hybrid, Multicore, two-stage}, doi = {10.1177/1094342013502097 }, author = {Azzam Haidar and Raffaele Solc{\`a} and Mark Gates and Stanimire Tomov and Thomas C. Schulthess and Jack Dongarra} } @conference {833, title = {Optimizing Krylov Subspace Solvers on Graphics Processing Units}, booktitle = {Fourth International Workshop on Accelerators and Hybrid Exascale Systems (AsHES), IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Krylov subspace solvers are often the method of choice when solving sparse linear systems iteratively. At the same time, hardware accelerators such as graphics processing units (GPUs) continue to offer significant floating point performance gains for matrix and vector computations through easy-to-use libraries of computational kernels. However, as these libraries are usually composed of a well optimized but limited set of linear algebra operations, applications that use them often fail to leverage the full potential of the accelerator. In this paper we target the acceleration of the BiCGSTAB solver for GPUs, showing that significant improvement can be achieved by reformulating the method and developing application-specific kernels instead of using the generic CUBLAS library provided by NVIDIA. We propose an implementation that benefits from a significantly reduced number of kernel launches and GPUhost communication events, by means of increased data locality and a simultaneous reduction of multiple scalar products. Using experimental data, we show that, depending on the dominance of the untouched sparse matrix vector products, significant performance improvements can be achieved compared to a reference implementation based on the CUBLAS library. We feel that such optimizations are crucial for the subsequent development of highlevel sparse linear algebra libraries.}, author = {Stanimire Tomov and Piotr Luszczek and Ichitaro Yamazaki and Jack Dongarra and Hartwig Anzt and William Sawyer} } @conference {828, title = {Performance and Portability with OpenCL for Throughput-Oriented HPC Workloads Across Accelerators, Coprocessors, and Multicore Processors}, booktitle = {5th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA {\textquoteright}14)}, year = {2014}, month = {2014-11}, publisher = {IEEE}, organization = {IEEE}, address = {New Orleans, LA}, abstract = {Ever since accelerators and coprocessors became the mainstream hardware for throughput-oriented HPC workloads, various programming techniques have been proposed to increase productivity in terms of both the performance and ease-of-use. We evaluate these aspects of OpenCL on a number of hardware platforms for an important subset of dense linear algebra operations that are relevant to a wide range of scientific applications. Our findings indicate that OpenCL portability has improved since our previous publication and many new and surprising usage scenarios are possible that rival those available after decades of software development on the CPUs. The combined performance-portability metric, even though not promised by the OpenCL standard, reflects the need for tuning performance-critical operations during the porting process and we show how a large portion of the available efficiency is lost if the tuning is not done correctly.}, doi = {10.1109/ScalA.2014.8}, author = {Azzam Haidar and Chongxiao Cao and Ichitaro Yamazaki and Jack Dongarra and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @article {814, title = {Performance and Reliability Trade-offs for the Double Checkpointing Algorithm}, journal = {International Journal of Networking and Computing}, volume = {4}, number = {1}, year = {2014}, month = {2014}, pages = {32-41}, chapter = {32}, abstract = {Fast checkpointing algorithms require distributed access to stable storage. This paper revisits the approach based upon double checkpointing, and compares the blocking algorithm of Zheng, Shi and Kal{\'e} [23], with the non-blocking algorithm of Ni, Meneses and Kal{\'e} [15] in terms of both performance and risk. We also extend the model proposedcan provide a better efficiency in [23, 15] to assess the impact of the overhead associated to non-blocking communications. In addition, we deal with arbitrary failure distributions (as opposed to uniform distributions in [23]). We then provide a new peer-to-peer checkpointing algorithm, called the triple checkpointing algorithm, that can work without additional memory, and achieves both higher efficiency and better risk handling than the double checkpointing algorithm. We provide performance and risk models for all the evaluated protocols, and compare them through comprehensive simulations.}, keywords = {communication contention, in-memory checkpoint, performance, resilience, risk}, issn = {2185-2847}, author = {Jack Dongarra and Thomas Herault and Yves Robert} } @techreport {830, title = {Performance of Various Computers Using Standard Linear Equations Software, (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Technical Report}, number = {CS-89-85}, year = {2014}, month = {2014-06}, publisher = {University of Tennessee}, abstract = {This report compares the performance of different computer systems in solving dense systems of linear equations. The comparison involves approximately a hundred computers, ranging from the Earth Simulator to personal computers.}, author = {Jack Dongarra} } @conference {764, title = {Power Monitoring with PAPI for Extreme Scale Architectures and Dataflow-based Programming Models}, booktitle = {2014 IEEE International Conference on Cluster Computing}, number = {ICL-UT-14-04}, year = {2014}, month = {2014-09}, publisher = {IEEE}, organization = {IEEE}, address = {Madrid, Spain}, abstract = {For more than a decade, the PAPI performance-monitoring library has provided a clear, portable interface to the hardware performance counters available on all modern CPUs and other components of interest (e.g., GPUs, network, and I/O systems). Most major end-user tools that application developers use to analyze the performance of their applications rely on PAPI to gain access to these performance counters. One of the critical road-blockers on the way to larger, more complex high performance systems, has been widely identified as being the energy efficiency constraints. With modern extreme scale machines having hundreds of thousands of cores, the ability to reduce power consumption for each CPU at the software level becomes critically important, both for economic and environmental reasons. In order for PAPI to continue playing its well established role in HPC, it is pressing to provide valuable performance data that not only originates from within the processing cores but also delivers insight into the power consumption of the system as a whole. An extensive effort has been made to extend the Performance API to support power monitoring capabilities for various platforms. This paper provides detailed information about three components that allow power monitoring on the Intel Xeon Phi and Blue Gene/Q. Furthermore, we discuss the integration of PAPI in PARSEC {\textendash} a taskbased dataflow-driven execution engine {\textendash} enabling hardware performance counter and power monitoring at true task granularity.}, doi = {10.1109/CLUSTER.2014.6968672}, author = {Heike McCraw and James Ralph and Anthony Danalis and Jack Dongarra} } @conference {871, title = {PTG: An Abstraction for Unhindered Parallelism}, booktitle = {International Workshop on Domain-Specific Languages and High-Level Frameworks for High Performance Computing (WOLFHPC)}, year = {2014}, month = {2014-11}, publisher = {IEEE Press}, organization = {IEEE Press}, address = {New Orleans, LA}, abstract = {

Increased parallelism and use of heterogeneous computing resources is now an established trend in High Performance Computing (HPC), a trend that, looking forward to Exascale, seems bound to intensify. Despite the evolution of hardware over the past decade, the programming paradigm of choice was invariably derived from Coarse Grain Parallelism with explicit data movements. We argue that message passing has remained the de facto standard in HPC because, until now, the ever increasing challenges that application developers had to address to create efficient portable applications remained manageable for expert programmers.

Data-flow based programming is an alternative approach with significant potential. In this paper, we discuss the Parameterized Task Graph (PTG) abstraction and present the specialized input language that we use to specify PTGs in our data-flow task-based runtime system, PaRSEC. This language and the corresponding execution model are in contrast with the execution model of explicit message passing as well as the model of alternative task based runtime systems. The Parameterized Task Graph language decouples the expression of the parallelism in the algorithm from the control-flow ordering, load balance, and data distribution. Thus, programs are more adaptable and map more efficiently on challenging hardware, as well as maintain portability across diverse architectures. To support these claims, we discuss the different challenges of HPC programming and how PaRSEC can address them, and we demonstrate that in today{\textquoteright}s large scale supercomputers, PaRSEC can significantly outperform state-of-the-art MPI applications and libraries, a trend that will increase with future architectural evolution.

}, keywords = {dte, parsec, plasma}, author = {Anthony Danalis and George Bosilca and Aurelien Bouteiller and Thomas Herault and Jack Dongarra} } @techreport {827, title = {PULSAR Users{\textquoteright} Guide, Parallel Ultra-Light Systolic Array Runtime}, journal = {University of Tennessee EECS Technical Report}, number = {UT-EECS-14-733}, year = {2014}, month = {2014-11}, publisher = {University of Tennessee}, abstract = {PULSAR version 2.0, released in November 2014, is a complete programming platform for large-scale distributed memory systems with multicore processors and hardware accelerators. PULSAR provides a simple abstraction layer over multithreading, message passing, and multi-GPU, multi-stream programming. PULSAR offers a general-purpose programming model, suitable for a wide range of scientific and engineering applications. PULSAR was inspired by systolic arrays, popularized by Hsiang-Tsung Kung and Charles E. Leiserson.}, author = {Jack Dongarra and Jakub Kurzak and Piotr Luszczek and Ichitaro Yamazaki} } @inproceedings {835, title = {Scaling Up Matrix Computations on Shared-Memory Manycore Systems with 1000 CPU Cores}, journal = {International conference on Supercomputing}, year = {2014}, month = {2014-06}, pages = {333-342}, publisher = {ACM}, address = {Munich, Germany}, abstract = {While the growing number of cores per chip allows researchers to solve larger scientific and engineering problems, the parallel efficiency of the deployed parallel software starts to decrease. This unscalability problem happens to both vendor-provided and open-source software and wastes CPU cycles and energy. By expecting CPUs with hundreds of cores to be imminent, we have designed a new framework to perform matrix computations for massively many cores. Our performance analysis on manycore systems shows that the unscalability bottleneck is related to Non-Uniform Memory Access (NUMA): memory bus contention and remote memory access latency. To overcome the bottleneck, we have designed NUMA-aware tile algorithms with the help of a dynamic scheduling runtime system to minimize NUMA memory accesses. The main idea is to identify the data that is, either read a number of times or written once by a thread resident on a remote NUMA node, then utilize the runtime system to conduct data caching and movement between different NUMA nodes. Based on the experiments with QR factorizations, we demonstrate that our framework is able to achieve great scalability on a 48-core AMD Opteron system (e.g., parallel efficiency drops only 3\% from one core to 48 cores). We also deploy our framework to an extreme-scale shared-memory SGI machine which has 1024 CPU cores and runs a single Linux operating system image. Our framework continues to scale well, and can outperform the vendor-optimized Intel MKL library by up to 750\%.}, isbn = {978-1-4503-2642-1}, doi = {10.1145/2597652.2597670}, author = {Fengguang Song and Jack Dongarra} } @conference {714, title = {Self-Adaptive Multiprecision Preconditioners on Multicore and Manycore Architectures}, booktitle = {VECPAR 2014}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {Based on the premise that preconditioners needed for scientific computing are not only required to be robust in the numerical sense, but also scalable for up to thousands of light-weight cores, we argue that this two-fold goal is achieved for the recently developed self-adaptive multi-elimination preconditioner. For this purpose, we revise the underlying idea and analyze the performance of implementations realized in the PARALUTION and MAGMA open-source software libraries on GPU architectures (using either CUDA or OpenCL), Intel{\textquoteright}s Many Integrated Core Architecture, and Intel{\textquoteright}s Sandy Bridge processor. The comparison with other well-established preconditioners like multi-coloured Gauss-Seidel, ILU(0) and multi-colored ILU(0), shows that the twofold goal of a numerically stable cross-platform performant algorithm is achieved.}, author = {Hartwig Anzt and Dimitar Lukarski and Stanimire Tomov and Jack Dongarra} } @conference {767, title = {A Step towards Energy Efficient Computing: Redesigning A Hydrodynamic Application on CPU-GPU}, booktitle = {IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Power and energy consumption are becoming an increasing concern in high performance computing. Compared to multi-core CPUs, GPUs have a much better performance per watt. In this paper we discuss efforts to redesign the most computation intensive parts of BLAST, an application that solves the equations for compressible hydrodynamics with high order finite elements, using GPUs [10, 1]. In order to exploit the hardware parallelism of GPUs and achieve high performance, we implemented custom linear algebra kernels. We intensively optimized our CUDA kernels by exploiting the memory hierarchy, which exceed the vendor{\textquoteright}s library routines substantially in performance. We proposed an autotuning technique to adapt our CUDA kernels to the orders of the finite element method. Compared to a previous base implementation, our redesign and optimization lowered the energy consumption of the GPU in two aspects: 60\% less time to solution and 10\% less power required. Compared to the CPU-only solution, our GPU accelerated BLAST obtained a 2:5x overall speedup and 1:42x energy efficiency (greenup) using 4th order (Q4) finite elements, and a 1:9x speedup and 1:27x greenup using 2nd order (Q2) finite elements.}, keywords = {Computer science, CUDA, FEM, Finite element method, linear algebra, nVidia, Tesla K20}, author = {Tingxing Dong and Veselin Dobrev and Tzanio Kolev and Robert Rieben and Stanimire Tomov and Jack Dongarra} } @conference {809, title = {Unified Development for Mixed Multi-GPU and Multi-Coprocessor Environments using a Lightweight Runtime Environment}, booktitle = {IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Many of the heterogeneous resources available to modern computers are designed for different workloads. In order to efficiently use GPU resources, the workload must have a greater degree of parallelism than a workload designed for multicore-CPUs. And conceptually, the Intel Xeon Phi coprocessors are capable of handling workloads somewhere in between the two. This multitude of applicable workloads will likely lead to mixing multicore-CPUs, GPUs, and Intel coprocessors in multi-user environments that must offer adequate computing facilities for a wide range of workloads. In this work, we are using a lightweight runtime environment to manage the resourcespecific workload, and to control the dataflow and parallel execution in two-way hybrid systems. The lightweight runtime environment uses task superscalar concepts to enable the developer to write serial code while providing parallel execution. In addition, our task abstractions enable unified algorithmic development across all the heterogeneous resources. We provide performance results for dense linear algebra applications, demonstrating the effectiveness of our approach and full utilization of a wide variety of accelerator hardware.}, keywords = {algorithms, Computer science, CUDA, Heterogeneous systems, Intel Xeon Phi, linear algebra, nVidia, Tesla K20, Tesla M2090}, author = {Azzam Haidar and Chongxiao Cao and Jack Dongarra and Piotr Luszczek and Stanimire Tomov} } @conference {768, title = {Utilizing Dataflow-based Execution for Coupled Cluster Methods}, booktitle = {2014 IEEE International Conference on Cluster Computing}, number = {ICL-UT-14-02}, year = {2014}, month = {2014-09}, publisher = {IEEE}, organization = {IEEE}, address = {Madrid, Spain}, abstract = {Computational chemistry comprises one of the driving forces of High Performance Computing. In particular, many-body methods, such as Coupled Cluster (CC) methods of the quantum chemistry package NWCHEM, are of particular interest for the applied chemistry community. Harnessing large fractions of the processing power of modern large scale computing platforms has become increasingly difficult. With the increase in scale, complexity, and heterogeneity of modern platforms, traditional programming models fail to deliver the expected performance scalability. On our way to Exascale and with these extremely hybrid platforms, dataflow-based programming models may be the only viable way for achieving and maintaining computation at scale. In this paper, we discuss a dataflow-based programming model and its applicability to NWCHEM{\textquoteright}s CC methods. Our dataflow version of the CC kernels breaks down the algorithm into fine-grained tasks with explicitly defined data dependencies. As a result, many of the traditional synchronization points can be eliminated, allowing for a dynamic reshaping of the execution based on the ongoing availability of computational resources. We build this experiment using PARSEC {\textendash} a task-based dataflow-driven execution engine {\textendash} that enables efficient task scheduling on distributed systems, providing a desirable portability layer for application developers.}, author = {Heike McCraw and Anthony Danalis and George Bosilca and Jack Dongarra and Karol Kowalski and Theresa Windus} } @article {icl:721, title = {Accelerating Linear System Solutions Using Randomization Techniques}, journal = {ACM Transactions on Mathematical Software (also LAWN 246)}, volume = {39}, year = {2013}, month = {2013-02}, abstract = {We illustrate how linear algebra calculations can be enhanced by statistical techniques in the case of a square linear system Ax = b. We study a random transformation of A that enables us to avoid pivoting and then to reduce the amount of communication. Numerical experiments show that this randomization can be performed at a very affordable computational price while providing us with a satisfying accuracy when compared to partial pivoting. This random transformation called Partial Random Butterfly Transformation (PRBT) is optimized in terms of data storage and flops count. We propose a solver where PRBT and the LU factorization with no pivoting take advantage of the current hybrid multicore/GPU machines and we compare its Gflop/s performance with a solver implemented in a current parallel library.}, keywords = {algorithms, dense linear algebra, experimentation, graphics processing units, linear systems, lu factorization, multiplicative preconditioning, numerical linear algebra, performance, plasma, randomization}, doi = {10.1145/2427023.2427025}, url = {http://dl.acm.org/citation.cfm?id=2427025}, author = {Marc Baboulin and Jack Dongarra and Julien Herrmann and Stanimire Tomov} } @techreport {694, title = {Assessing the impact of ABFT and Checkpoint composite strategies}, journal = {University of Tennessee Computer Science Technical Report}, number = {ICL-UT-13-03}, year = {2013}, abstract = {Algorithm-specific fault tolerant approaches promise unparalleled scalability and performance in failure-prone environments. With the advances in the theoretical and practical understanding of algorithmic traits enabling such approaches, a growing number of frequently used algorithms (including all widely used factorization kernels) have been proven capable of such properties. These algorithms provide a temporal section of the execution when the data is protected by it{\textquoteright}s own intrinsic properties, and can be algorithmically recomputed without the need of checkpoints. However, while typical scientific applications spend a significant fraction of their execution time in library calls that can be ABFT-protected, they interleave sections that are difficult or even impossible to protect with ABFT. As a consequence, the only fault-tolerance approach that is currently used for these applications is checkpoint/restart. In this paper we propose a model and a simulator to investigate the behavior of a composite protocol, that alternates between ABFT and checkpoint/restart protection for effective protection of each phase of an iterative application composed of ABFT-aware and ABFT-unaware sections. We highlight this approach drastically increases the performance delivered by the system, especially at scale, by providing means to rarefy the checkpoints while simultaneously decreasing the volume of data needed to be checkpointed.}, keywords = {ABFT, checkpoint, fault-tolerance, High-performance computing, resilience}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Yves Robert and Jack Dongarra} } @conference {1383, title = {Beyond the CPU: Hardware Performance Counter Monitoring on Blue Gene/Q}, booktitle = {International Supercomputing Conference 2013 (ISC{\textquoteright}13)}, year = {2013}, month = {2013-06}, publisher = {Springer}, organization = {Springer}, address = {Leipzig, Germany}, author = {Heike McCraw and Dan Terpstra and Jack Dongarra and Kris Davis and Roy Musselman} } @article {icl:702, title = {BlackjackBench: Portable Hardware Characterization with Automated Results Analysis}, journal = {The Computer Journal}, year = {2013}, month = {2013-03}, abstract = {DARPA{\textquoteright}s AACE project aimed to develop Architecture Aware Compiler Environments. Such a compiler automatically characterizes the targeted hardware and optimizes the application codes accordingly. We present the BlackjackBench suite, a collection of portable micro-benchmarks that automate system characterization, plus statistical analysis techniques for interpreting the results. The BlackjackBench benchmarks discover the effective sizes and speeds of the hardware environment rather than the often unattainable peak values. We aim at hardware characteristics that can be observed by running executables generated by existing compilers from standard C codes. We characterize the memory hierarchy, including cache sharing and non-uniform memory access characteristics of the system, properties of the processing cores affecting the instruction execution speed and the length of the operating system scheduler time slot. We show how these features of modern multicores can be discovered programmatically. We also show how the features could potentially interfere with each other resulting in incorrect interpretation of the results, and how established classification and statistical analysis techniques can reduce experimental noise and aid automatic interpretation of results. We show how effective hardware metrics from our probes allow guided tuning of computational kernels that outperform an autotuning library further tuned by the hardware vendor.}, keywords = {hardware characterization, micro-benchmarks, statistical analysis}, doi = {10.1093/comjnl/bxt057}, author = {Anthony Danalis and Piotr Luszczek and Gabriel Marin and Jeffrey Vetter and Jack Dongarra} } @article {icl:719, title = {A Block-Asynchronous Relaxation Method for Graphics Processing Units}, journal = {Journal of Parallel and Distributed Computing}, volume = {73}, year = {2013}, month = {2013-12}, pages = {1613{\textendash}1626}, abstract = {In this paper, we analyze the potential of asynchronous relaxation methods on Graphics Processing Units (GPUs). We develop asynchronous iteration algorithms in CUDA and compare them with parallel implementations of synchronous relaxation methods on CPU- or GPU-based systems. For a set of test matrices from UFMC we investigate convergence behavior, performance and tolerance to hardware failure. We observe that even for our most basic asynchronous relaxation scheme, the method can efficiently leverage the GPUs computing power and is, despite its lower convergence rate compared to the Gauss{\textendash}Seidel relaxation, still able to provide solution approximations of certain accuracy in considerably shorter time than Gauss{\textendash}Seidel running on CPUs- or GPU-based Jacobi. Hence, it overcompensates for the slower convergence by exploiting the scalability and the good fit of the asynchronous schemes for the highly parallel GPU architectures. Further, enhancing the most basic asynchronous approach with hybrid schemes{\textendash}using multiple iterations within the {\textquoteleft}{\textquoteleft}subdomain{\textquoteright}{\textquoteright} handled by a GPU thread block{\textendash}we manage to not only recover the loss of global convergence but often accelerate convergence of up to two times, while keeping the execution time of a global iteration practically the same. The combination with the advantageous properties of asynchronous iteration methods with respect to hardware failure identifies the high potential of the asynchronous methods for Exascale computing.}, doi = {http://dx.doi.org/10.1016/j.jpdc.2013.05.008}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra and Vincent Heuveline} } @techreport {681, title = {clMAGMA: High Performance Dense Linear Algebra with OpenCL}, journal = {University of Tennessee Technical Report (Lawn 275)}, number = {UT-CS-13-706}, year = {2013}, month = {2013-03}, publisher = {University of Tennessee}, abstract = {This paper presents the design and implementation of sev- eral fundamental dense linear algebra (DLA) algorithms in OpenCL. In particular, these are linear system solvers and eigenvalue problem solvers. Further, we give an overview of the clMAGMA library, an open source, high performance OpenCL library that incorporates the developments pre- sented, and in general provides to heterogeneous architec- tures the DLA functionality of the popular LAPACK library. The LAPACK-compliance and use of OpenCL simplify the use of clMAGMA in applications, while providing them with portably performant DLA. High performance is ob- tained through use of the high-performance OpenCL BLAS, hardware and OpenCL-speci c tuning, and a hybridization methodology where we split the algorithm into computa- tional tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components.}, author = {Chongxiao Cao and Jack Dongarra and Peng Du and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @article {750, title = {Correlated Set Coordination in Fault Tolerant Message Logging Protocols}, journal = {Concurrency and Computation: Practice and Experience}, volume = {25}, year = {2013}, month = {2013-03}, pages = {572-585}, abstract = {With our current expectation for the exascale systems, composed of hundred of thousands of many-core nodes, the mean time between failures will become small, even under the most optimistic assumptions. One of the most scalable checkpoint restart techniques, the message logging approach, is the most challenged when the number of cores per node increases because of the high overhead of saving the message payload. Fortunately, for two processes on the same node, the failure probability is correlated, meaning that coordinated recovery is free. In this paper, we propose an intermediate approach that uses coordination between correlated processes but retains the scalability advantage of message logging between independent ones. The algorithm still belongs to the family of event logging protocols but eliminates the need for costly payload logging between coordinated processes.}, doi = {10.1002/cpe.2859}, author = {Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra} } @inproceedings {716, title = {CPU-GPU Hybrid Bidiagonal Reduction With Soft Error Resilience}, journal = {ScalA {\textquoteright}13 Proceedings of the Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2013}, month = {2013-11}, address = {Montpellier, France}, abstract = {Soft errors pose a real challenge to applications running on modern hardware as the feature size becomes smaller and the integration density increases for both the modern processors and the memory chips. Soft errors manifest themselves as bit-flips that alter the user value, and numerical software is a category of software that is sensitive to such data changes. In this paper, we present a design of a bidiagonal reduction algorithm that is resilient to soft errors, and we also describe its implementation on hybrid CPU-GPU architectures. Our fault-tolerant algorithm employs Algorithm Based Fault Tolerance, combined with reverse computation, to detect, locate, and correct soft errors. The tests were performed on a Sandy Bridge CPU coupled with an NVIDIA Kepler GPU. The included experiments show that our resilient bidiagonal reduction algorithm adds very little overhead compared to the error-prone code. At matrix size 10110 x 10110, our algorithm only has a performance overhead of 1.085\% when one error occurs, and 0.354\% when no errors occur.}, author = {Yulu Jia and Piotr Luszczek and George Bosilca and Jack Dongarra} } @article {icl:698, title = {Dense Linear Algebra on Distributed Heterogeneous Hardware with a Symbolic DAG Approach}, journal = {Scalable Computing and Communications: Theory and Practice}, year = {2013}, month = {2013-03}, pages = {699-735}, publisher = {John Wiley \& Sons}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Piotr Luszczek and Jack Dongarra}, editor = {Samee Khan and Lin-Wang Wang and Albert Zomaya} } @techreport {703, title = {Designing LU-QR hybrid solvers for performance and stability}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 282)}, number = {ut-eecs-13-719}, year = {2013}, month = {2013-10}, publisher = {University of Tennessee}, author = {Mathieu Faverge and Julien Herrmann and Julien Langou and Bradley Lowery and Yves Robert and Jack Dongarra} } @techreport {689, title = {Dynamically balanced synchronization-avoiding LU factorization with multicore and GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {ut-cs-13-713}, year = {2013}, month = {2013-07}, abstract = {Graphics processing units (GPUs) brought huge performance improvements in the scientific and numerical fields. We present an efficient hybrid CPU/GPU computing approach that is portable, dynamically and efficiently balances the workload between the CPUs and the GPUs, and avoids data transfer bottlenecks that are frequently present in numerical algorithms. Our approach determines the amount of initial work to assign to the CPUs before the execution, and then dynamically balances workloads during the execution. Then, we present a theoretical model to guide the choice of the initial amount of work for the CPUs. The validation of our model allows our approach to self-adapt on any architecture using the manufacturer{\textquoteright}s characteristics of the underlying machine. We illustrate our method for the LU factorization. For this case, we show that the use of our approach combined with a communication avoiding LU algorithm is efficient. For example, our experiments on high-end hybrid CPU/GPU systems show that our dynamically balanced synchronization-avoiding LU is both multicore and GPU scalable. Comparisons with state-of-the-art libraries like MKL (for multicore) and MAGMA (for hybrid systems) are provided, demonstrating significant performance improvements. The approach is applicable to other linear algebra algorithms. The scheduling mechanisms and tuning models can be incorporated into respectively dynamic runtime systems/schedulers and autotuning frameworks for hybrid CPU/MIC/GPU architectures.}, author = {Simplice Donfack and Stanimire Tomov and Jack Dongarra} } @conference {700, title = {Efficient Parallelization of Batch Pattern Training Algorithm on Many-core and Cluster Architectures}, booktitle = {7th IEEE International Conference on Intelligent Data Acquisition and Advanced Computing Systems}, year = {2013}, month = {2013-09}, address = {Berlin, Germany}, abstract = {Abstract{\textemdash}The experimental research of the parallel batch pattern back propagation training algorithm on the example of recirculation neural network on many-core high performance computing systems is presented in this paper. The choice of recirculation neural network among the multilayer perceptron, recurrent and radial basis neural networks is proved. The model of a recirculation neural network and usual sequential batch pattern algorithm of its training are theoretically described. An algorithmic description of the parallel version of the batch pattern training method is presented. The experimental research is fulfilled using the Open MPI, Mvapich and Intel MPI message passing libraries. The results obtained on many-core AMD system and Intel MIC are compared with the results obtained on a cluster system. Our results show that the parallelization efficiency is about 95\% on 12 cores located inside one physical AMD processor for the considered minimum and maximum scenarios. The parallelization efficiency is about 70-75\% on 48 AMD cores for the minimum and maximum scenarios. These results are higher by 15-36\% (depending on the version of MPI library) in comparison with the results obtained on 48 cores of a cluster system. The parallelization efficiency obtained on Intel MIC architecture is surprisingly low, asking for deeper analysis.}, keywords = {many-core system, parallel batch pattern training, parallelization efficiency, recirculation neural network}, author = {Volodymyr Turchenko and George Bosilca and Aurelien Bouteiller and Jack Dongarra} } @article {icl:538, title = {Enabling Workflows in GridSolve: Request Sequencing and Service Trading}, journal = {Journal of Supercomputing}, volume = {64}, year = {2013}, month = {2013-06}, pages = {1133-1152}, chapter = {1133}, abstract = {GridSolve employs a RPC-based client-agent-server model for solving computational problems. There are two deficiencies associated with GridSolve when a computational problem essentially forms a workflow consisting of a sequence of tasks with data dependencies between them. First, intermediate results are always passed through the client, resulting in unnecessary data transport. Second, since the execution of each individual task is a separate RPC session, it is difficult to enable any potential parallelism among tasks. This paper presents a request sequencing technique that addresses these deficiencies and enables workflow executions. Building on the request sequencing work, one way to generate workflows is by taking higher level service requests and decomposing them into a sequence of simpler service requests using a technique called service trading. A service trading component is added to GridSolve to take advantage of the new dynamic request sequencing. The features described here include automatic DAG construction and data dependency analysis, direct interserver data transfer, parallel task execution capabilities, and a service trading component.}, keywords = {grid computing, gridpac, netsolve, service trading, workflow applications}, issn = {1573-0484}, doi = {10.1007/s11227-010-0549-1}, author = {Yinan Li and Asim YarKhan and Jack Dongarra and Keith Seymour and Aurlie Hurault} } @article {869, title = {An evaluation of User-Level Failure Mitigation support in MPI}, journal = {Computing}, volume = {95}, year = {2013}, month = {2013-12}, pages = {1171-1184}, abstract = {As the scale of computing platforms becomes increasingly extreme, the requirements for application fault tolerance are increasing as well. Techniques to address this problem by improving the resilience of algorithms have been developed, but they currently receive no support from the programming model, and without such support, they are bound to fail. This paper discusses the failure-free overhead and recovery impact of the user-level failure mitigation proposal presented in the MPI Forum. Experiments demonstrate that fault-aware MPI has little or no impact on performance for a range of applications, and produces satisfactory recovery times when there are failures.}, keywords = {Fault tolerance, MPI, User-level fault mitigation}, doi = {10.1007/s00607-013-0331-3}, author = {Wesley Bland and Aurelien Bouteiller and Thomas Herault and Joshua Hursey and George Bosilca and Jack Dongarra} } @article {691, title = {Extending the scope of the Checkpoint-on-Failure protocol for forward recovery in standard MPI}, journal = {Concurrency and Computation: Practice and Experience}, year = {2013}, month = {2013-07}, abstract = {Most predictions of exascale machines picture billion ways parallelism, encompassing not only millions of cores but also tens of thousands of nodes. Even considering extremely optimistic advances in hardware reliability, probabilistic amplification entails that failures will be unavoidable. Consequently, software fault tolerance is paramount to maintain future scientific productivity. Two major problems hinder ubiquitous adoption of fault tolerance techniques: (i) traditional checkpoint-based approaches incur a steep overhead on failure free operations and (ii) the dominant programming paradigm for parallel applications (the message passing interface (MPI) Standard) offers extremely limited support of software-level fault tolerance approaches. In this paper, we present an approach that relies exclusively on the features of a high quality implementation, as defined by the current MPI Standard, to enable advanced forward recovery techniques, without incurring the overhead of customary periodic checkpointing. With our approach, when failure strikes, applications regain control to make a checkpoint before quitting execution. This checkpoint is in reaction to the failure occurrence rather than periodic. This checkpoint is reloaded in a new MPI application, which restores a sane environment for the forward, application-based recovery technique to repair the failure-damaged dataset. The validity and performance of this approach are evaluated on large-scale systems, using the QR factorization as an example. Published 2013. This article is a US Government work and is in the public domain in the USA.}, doi = {10.1002/cpe.3100}, url = {http://doi.wiley.com/10.1002/cpe.3100}, author = {Wesley Bland and Peng Du and Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra} } @article {752, title = {Hierarchical QR Factorization Algorithms for Multi-core Cluster Systems}, journal = {Parallel Computing}, volume = {39}, year = {2013}, month = {2013-05}, pages = {212-232}, abstract = {This paper describes a new QR factorization algorithm which is especially designed for massively parallel platforms combining parallel distributed nodes, where a node is a multi-core processor. These platforms represent the present and the foreseeable future of high-performance computing. Our new QR factorization algorithm falls in the category of the tile algorithms which naturally enables good data locality for the sequential kernels executed by the cores (high sequential performance), low number of messages in a parallel distributed setting (small latency term), and fine granularity (high parallelism). Each tile algorithm is uniquely characterized by its sequence of reduction trees. In the context of a cluster of nodes, in order to minimize the number of inter-processor communications (aka, {\textquoteleft}{\textquoteleft}communication-avoiding{\textquoteright}{\textquoteright}), it is natural to consider hierarchical trees composed of an {\textquoteleft}{\textquoteleft}inter-node{\textquoteright}{\textquoteright} tree which acts on top of {\textquoteleft}{\textquoteleft}intra-node{\textquoteright}{\textquoteright} trees. At the intra-node level, we propose a hierarchical tree made of three levels: (0) {\textquoteleft}{\textquoteleft}TS level{\textquoteright}{\textquoteright} for cache-friendliness, (1) {\textquoteleft}{\textquoteleft}low-level{\textquoteright}{\textquoteright} for decoupled highly parallel inter-node reductions, (2) {\textquoteleft}{\textquoteleft}domino level{\textquoteright}{\textquoteright} to efficiently resolve interactions between local reductions and global reductions. Our hierarchical algorithm and its implementation are flexible and modular, and can accommodate several kernel types, different distribution layouts, and a variety of reduction trees at all levels, both inter-node and intra-node. Numerical experiments on a cluster of multi-core nodes (i) confirm that each of the four levels of our hierarchical tree contributes to build up performance and (ii) build insights on how these levels influence performance and interact within each other. Our implementation of the new algorithm with the DAGUE scheduling tool significantly outperforms currently available QR factorization software for all matrix shapes, thereby bringing a new advance in numerical linear algebra for petascale and exascale platforms.}, keywords = {Cluster, Distributed memory, Hierarchical architecture, multi-core, numerical linear algebra, QR factorization}, author = {Jack Dongarra and Mathieu Faverge and Thomas Herault and Mathias Jacquelin and Julien Langou and Yves Robert} } @article {759, title = {High Performance Bidiagonal Reduction using Tile Algorithms on Homogeneous Multicore Architectures}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {39}, number = {16}, year = {2013}, abstract = {This article presents a new high-performance bidiagonal reduction (BRD) for homogeneous multicore architectures. This article is an extension of the high-performance tridiagonal reduction implemented by the same authors [Luszczek et al., IPDPS 2011] to the BRD case. The BRD is the first step toward computing the singular value decomposition of a matrix, which is one of the most important algorithms in numerical linear algebra due to its broad impact in computational science. The high performance of the BRD described in this article comes from the combination of four important features: (1) tile algorithms with tile data layout, which provide an efficient data representation in main memory; (2) a two-stage reduction approach that allows to cast most of the computation during the first stage (reduction to band form) into calls to Level 3 BLAS and reduces the memory traffic during the second stage (reduction from band to bidiagonal form) by using high-performance kernels optimized for cache reuse; (3) a data dependence translation layer that maps the general algorithm with column-major data layout into the tile data layout; and (4) a dynamic runtime system that efficiently schedules the newly implemented kernels across the processing units and ensures that the data dependencies are not violated. A detailed analysis is provided to understand the critical impact of the tile size on the total execution time, which also corresponds to the matrix bandwidth size after the reduction of the first stage. The performance results show a significant improvement over currently established alternatives. The new high-performance BRD achieves up to a 30-fold speedup on a 16-core Intel Xeon machine with a 12000{\texttimes} 12000 matrix size against the state-of-the-art open source and commercial numerical software packages, namely LAPACK, compiled with optimized and multithreaded BLAS from MKL as well as Intel MKL version 10.2.}, keywords = {algorithms, bidiagional reduction, bulge chasing, data translation layer, dynamic scheduling, high performance kernels, performance, tile algorithms, two-stage approach}, doi = {10.1145/2450153.2450154}, author = {Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @inbook {754, title = {HPC Challenge: Design, History, and Implementation Highlights}, booktitle = {Contemporary High Performance Computing: From Petascale Toward Exascale}, year = {2013}, publisher = {Taylor and Francis}, organization = {Taylor and Francis}, chapter = {2}, address = {Boca Raton, FL}, keywords = {exascale, hpc challenge, hpcc}, isbn = {978-1-4665-6834-1}, author = {Jack Dongarra and Piotr Luszczek} } @techreport {690, title = {Hydrodynamic Computation with Hybrid Programming on CPU-GPU Clusters}, journal = {University of Tennessee Computer Science Technical Report}, number = {ut-cs-13-714}, year = {2013}, month = {2013-07}, abstract = {The explosion of parallelism and heterogeneity in today{\textquoteright}s computer architectures has created opportunities as well as challenges for redesigning legacy numerical software to harness the power of new hardware. In this paper we address the main challenges in redesigning BLAST { a numerical library that solves the equations of compressible hydrodynamics using high order nite element methods (FEM) in a moving Lagrangian frame { to support CPU-GPU clusters. We use a hybrid MPI + OpenMP + CUDA programming model that includes two layers: domain decomposed MPI parallelization and OpenMP + CUDA acceleration in a given domain. To optimize the code, we implemented custom linear algebra kernels and introduced an auto-tuning technique to deal with heterogeneity and load balancing at runtime. Our tests show that 12 Intel Xeon cores and two M2050 GPUs deliver a 24x speedup compared to a single core, and a 2.5x speedup compared to 12 MPI tasks in one node. Further, we achieve perfect weak scaling, demonstrated on a cluster with up to 64 GPUs in 32 nodes. Our choice of programming model and proposed solutions, as related to parallelism and load balancing, specifically targets high order FEM discretizations, and can be used equally successfully for applications beyond hydrodynamics. A major accomplishment is that we further establish the appeal of high order FEMs, which despite their better approximation properties, are often avoided due to their high computational cost. GPUs, as we show, have the potential to make them the method of choice, as the increased computational cost is also localized, e.g., cast as Level 3 BLAS, and thus can be done very efficiently (close to \free" relative to the usual overheads inherent in sparse computations).}, author = {Tingxing Dong and Veselin Dobrev and Tzanio Kolev and Robert Rieben and Stanimire Tomov and Jack Dongarra} } @article {icl:691, title = {Implementing a Blocked Aasen{\textquoteright}s Algorithm with a Dynamic Scheduler on Multicore Architectures}, journal = {IPDPS 2013 (submitted)}, year = {2013}, month = {2013-00}, address = {Boston, MA}, abstract = {Factorization of a dense symmetric indefinite matrix is a key computational kernel in many scientific and engineering simulations. However, there is no scalable factorization algorithm that takes advantage of the symmetry and guarantees numerical stability through pivoting at the same time. This is because such an algorithm exhibits many of the fundamental challenges in parallel programming like irregular data accesses and irregular task dependencies. In this paper, we address these challenges in a tiled implementation of a blocked Aasen{\textquoteright}s algorithm using a dynamic scheduler. To fully exploit the limited parallelism in this left-looking algorithm, we study several performance enhancing techniques; e.g., parallel reduction to update a panel, tall-skinny LU factorization algorithms to factorize the panel, and a parallel implementation of symmetric pivoting. Our performance results on up to 48 AMD Opteron processors demonstrate that our implementation obtains speedups of up to 2.8 over MKL, while losing only one or two digits in the computed residual norms.}, author = {Ichitaro Yamazaki and Dulceneia Becker and Jack Dongarra and Alex Druinsky and I. Peled and Sivan Toledo and Grey Ballard and James Demmel and Oded Schwartz} } @techreport {682, title = {Implementing a systolic algorithm for QR factorization on multicore clusters with PaRSEC}, journal = {Lawn 277}, number = {UT-CS-13-709}, year = {2013}, month = {2013-05}, abstract = {This article introduces a new systolic algorithm for QR factorization, and its implementation on a supercomputing cluster of multicore nodes. The algorithm targets a virtual 3D-array and requires only local communications. The implementation of the algorithm uses threads at the node level, and MPI for inter-node communications. The complexity of the implementation is addressed with the PaRSEC software, which takes as input a parametrized dependence graph, which is derived from the algorithm, and only requires the user to decide, at the high-level, the allocation of tasks to nodes. We show that the new algorithm exhibits competitive performance with state-of-the-art QR routines on a supercomputer called Kraken, which shows that high-level programming environments, such as PaRSEC, provide a viable alternative to enhance the production of quality software on complex and hierarchical architectures}, author = {Guillaume Aupy and Mathieu Faverge and Yves Robert and Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @techreport {704, title = {An Improved Parallel Singular Value Algorithm and Its Implementation for Multicore Hardware}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 283)}, number = {ut-eecs-13-720}, year = {2013}, month = {2013-10}, publisher = {University of Tennessee}, keywords = {lapack, plasma, scalapack}, author = {Azzam Haidar and Piotr Luszczek and Jakub Kurzak and Jack Dongarra} } @conference {696, title = {An Improved Parallel Singular Value Algorithm and Its Implementation for Multicore Hardware}, booktitle = {Supercomputing 2013}, year = {2013}, month = {2013-11}, address = {Denver, CO}, author = {Azzam Haidar and Piotr Luszczek and Jakub Kurzak and Jack Dongarra} } @inbook {762, title = {Keeneland: Computational Science Using Heterogeneous GPU Computing}, booktitle = {Contemporary High Performance Computing: From Petascale Toward Exascale}, series = {CRC Computational Science Series}, year = {2013}, publisher = {Taylor and Francis}, organization = {Taylor and Francis}, chapter = {7}, address = {Boca Raton, FL}, abstract = {The Keeneland Project is a five year Track 2D grant awarded by the National Science Foundation (NSF) under solicitation NSF 08-573 in August 2009 for the development and deployment of an innovative high performance computing system. The Keeneland project is led by the Georgia Institute of Technology (Georgia Tech) in collaboration with the University of Tennessee at Knoxville, National Institute of Computational Sciences, and Oak Ridge National Laboratory.}, author = {Jeffrey Vetter and Richard Glassbrook and Karsten Schwan and Sudha Yalamanchili and Mitch Horton and Ada Gavrilovska and Magda Slawinska and Jack Dongarra and Jeremy Meredith and Philip Roth and Kyle Spafford and Stanimire Tomov and John Wynkoop} } @article {icl:734, title = {Kernel-assisted and topology-aware MPI collective communications on multi-core/many-core platforms}, journal = {Journal of Parallel and Distributed Computing}, volume = {73}, year = {2013}, month = {2013-07}, pages = {1000-1010}, abstract = {Multicore Clusters, which have become the most prominent form of High Performance Computing (HPC) systems, challenge the performance of MPI applications with non-uniform memory accesses and shared cache hierarchies. Recent advances in MPI collective communications have alleviated the performance issue exposed by deep memory hierarchies by carefully considering the mapping between the collective topology and the hardware topologies, as well as the use of single-copy kernel assisted mechanisms. However, on distributed environments, a single level approach cannot encompass the extreme variations not only in bandwidth and latency capabilities, but also in the capability to support duplex communications or operate multiple concurrent copies. This calls for a collaborative approach between multiple layers of collective algorithms, dedicated to extracting the maximum degree of parallelism from the collective algorithm by consolidating the intra- and inter-node communications. In this work, we present HierKNEM, a kernel-assisted topology-aware collective framework, and the mechanisms deployed by this framework to orchestrate the collaboration between multiple layers of collective algorithms. The resulting scheme maximizes the overlap of intra- and inter-node communications. We demonstrate experimentally, by considering three of the most used collective operations (Broadcast, Allgather and Reduction), that (1) this approach is immune to modifications of the underlying process-core binding; (2) it outperforms state-of-art MPI libraries (Open MPI, MPICH2 and MVAPICH2) demonstrating up to a 30x speedup for synthetic benchmarks, and up to a 3x acceleration for a parallel graph application (ASP); (3) it furthermore demonstrates a linear speedup with the increase of the number of cores per compute node, a paramount requirement for scalability on future many-core hardware. }, keywords = {Cluster, Collective communication, Hierarchical, HPC, MPI, Multicore}, doi = {10.1016/j.jpdc.2013.01.015}, url = {http://www.sciencedirect.com/science/article/pii/S0743731513000166}, author = {Teng Ma and George Bosilca and Aurelien Bouteiller and Jack Dongarra} } @inbook {747, title = {LAPACK}, booktitle = {Handbook of Linear Algebra}, year = {2013}, publisher = {CRC Press}, organization = {CRC Press}, edition = {Second}, address = {Boca Raton, FL}, abstract = {With a substantial amount of new material, the Handbook of Linear Algebra, Second Edition provides comprehensive coverage of linear algebra concepts, applications, and computational software packages in an easy-to-use format. It guides you from the very elementary aspects of the subject to the frontiers of current research. Along with revisions and updates throughout, the second edition of this bestseller includes 20 new chapters.}, isbn = {9781466507289}, author = {Zhaojun Bai and James Demmel and Jack Dongarra and Julien Langou and Jenny Wang} } @inproceedings {757, title = {Leading Edge Hybrid Multi-GPU Algorithms for Generalized Eigenproblems in Electronic Structure Calculations}, journal = {International Supercomputing Conference (ISC)}, volume = {7905}, year = {2013}, month = {2013-06}, pages = {67-80}, publisher = {Springer Berlin Heidelberg}, edition = {Lecture Notes in Computer Science}, address = {Leipzig, Germany}, abstract = {Today{\textquoteright}s high computational demands from engineering fields and complex hardware development make it necessary to develop and optimize new algorithms toward achieving high performance and good scalability on the next generation of computers. The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe and analyze a successful methodology to address the challenges{\textemdash}starting from our algorithm design, kernel optimization and tuning, to our programming model{\textemdash}in the development of a scalable high-performance generalized eigenvalue solver in the context of electronic structure calculations in materials science applications. We developed a set of leading edge dense linear algebra algorithms, as part of a generalized eigensolver, featuring fine grained memory aware kernels, a task based approach and hybrid execution/scheduling. The goal of the new design is to increase the computational intensity of the major compute kernels and to reduce synchronization and data transfers between GPUs. We report the performance impact on the generalized eigensolver when different fractions of eigenvectors are needed. The algorithm described provides an enormous performance boost compared to current GPU-based solutions, and performance comparable to state-of-the-art distributed solutions, using a single node with multiple GPUs.}, isbn = {978-3-642-38750-0}, doi = {10.1007/978-3-642-38750-0_6}, author = {Azzam Haidar and Stanimire Tomov and Jack Dongarra and Raffaele Solc{\`a} and Thomas C. Schulthess} } @article {756, title = {Level-3 Cholesky Factorization Routines Improve Performance of Many Cholesky Algorithms}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {39}, year = {2013}, month = {2013-02}, abstract = {Four routines called DPOTF3i, i = a,b,c,d, are presented. DPOTF3i are a novel type of level-3 BLAS for use by BPF (Blocked Packed Format) Cholesky factorization and LAPACK routine DPOTRF. Performance of routines DPOTF3i are still increasing when the performance of Level-2 routine DPOTF2 of LAPACK starts decreasing. This is our main result and it implies, due to the use of larger block size nb, that DGEMM, DSYRK, and DTRSM performance also increases! The four DPOTF3i routines use simple register blocking. Different platforms have different numbers of registers. Thus, our four routines have different register blocking sizes. BPF is introduced. LAPACK routines for POTRF and PPTRF using BPF instead of full and packed format are shown to be trivial modifications of LAPACK POTRF source codes. We call these codes BPTRF. There are two variants of BPF: lower and upper. Upper BPF is {\textquotedblleft}identical{\textquotedblright} to Square Block Packed Format (SBPF). {\textquotedblleft}LAPACK{\textquotedblright} implementations on multicore processors use SBPF. Lower BPF is less efficient than upper BPF. Vector inplace transposition converts lower BPF to upper BPF very efficiently. Corroborating performance results for DPOTF3i versus DPOTF2 on a variety of common platforms are given for n ≈ nb as well as results for large n comparing DBPTRF versus DPOTRF.}, doi = {10.1145/2427023.2427026}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra and Jos{\'e} Herrero and Julien Langou} } @article {icl:693, title = {LU Factorization with Partial Pivoting for a Multicore System with Accelerators}, journal = {IEEE Transactions on Parallel and Distributed Computing}, volume = {24}, year = {2013}, month = {2013-08}, pages = {1613-1621}, chapter = {1613}, abstract = {LU factorization with partial pivoting is a canonical numerical procedure and the main component of the high performance LINPACK benchmark. This paper presents an implementation of the algorithm for a hybrid, shared memory, system with standard CPU cores and GPU accelerators. The difficulty of implementing the algorithm for such a system lies in the disproportion between the computational power of the CPUs, compared to the GPUs, and in the meager bandwidth of the communication link between their memory systems. An additional challenge comes from the complexity of the memory-bound and synchronization-rich nature of the panel factorization component of the block LU algorithm, imposed by the use of partial pivoting. The challenges are tackled with the use of a data layout geared toward complex memory hierarchies, autotuning of GPU kernels, fine-grain parallelization of memory-bound CPU operations and dynamic scheduling of tasks to different devices. Performance in excess of one TeraFLOPS is achieved using four AMD Magny Cours CPUs and four NVIDIA Fermi GPUs.}, keywords = {accelerator, Gaussian elimination, gpu, lu factorization, manycore, Multicore, partial pivoting, plasma}, doi = {http://doi.ieeecomputersociety.org/10.1109/TPDS.2012.242}, author = {Jakub Kurzak and Piotr Luszczek and Jack Dongarra} } @techreport {icl:733, title = {Multi-criteria checkpointing strategies: optimizing response-time versus resource utilization}, journal = {University of Tennessee Computer Science Technical Report}, number = {ICL-UT-13-01}, year = {2013}, month = {2013-02}, abstract = {Failures are increasingly threatening the eciency of HPC systems, and current projections of Exascale platforms indicate that rollback recovery, the most convenient method for providing fault tolerance to generalpurpose applications, reaches its own limits at such scales. One of the reasons explaining this unnerving situation comes from the focus that has been given to per-application completion time, rather than to platform efficiency. In this paper, we discuss the case of uncoordinated rollback recovery where the idle time spent waiting recovering processors is used to progress a different, independent application from the system batch queue. We then propose an extended model of uncoordinated checkpointing that can discriminate between idle time and wasted computation. We instantiate this model in a simulator to demonstrate that, with this strategy, uncoordinated checkpointing per application completion time is unchanged, while it delivers near-perfect platform efficiency.}, author = {Aurelien Bouteiller and Franck Cappello and Jack Dongarra and Amina Guermouche and Thomas Herault and Yves Robert} } @conference {868, title = {Multi-criteria Checkpointing Strategies: Response-Time versus Resource Utilization}, booktitle = {Euro-Par 2013}, year = {2013}, month = {2013-08}, publisher = {Springer}, organization = {Springer}, address = {Aachen, Germany}, abstract = {Failures are increasingly threatening the efficiency of HPC systems, and current projections of Exascale platforms indicate that roll- back recovery, the most convenient method for providing fault tolerance to general-purpose applications, reaches its own limits at such scales. One of the reasons explaining this unnerving situation comes from the focus that has been given to per-application completion time, rather than to platform efficiency. In this paper, we discuss the case of uncoordinated rollback recovery where the idle time spent waiting recovering processors is used to progress a different, independent application from the sys- tem batch queue. We then propose an extended model of uncoordinated checkpointing that can discriminate between idle time and wasted com- putation. We instantiate this model in a simulator to demonstrate that, with this strategy, uncoordinated checkpointing per application comple- tion time is unchanged, while it delivers near-perfect platform efficiency.}, author = {Aurelien Bouteiller and Franck Cappello and Jack Dongarra and Amina Guermouche and Thomas Herault and Yves Robert} } @article {icl:704, title = {Multithreading in the PLASMA Library}, journal = {Multi and Many-Core Processing: Architecture, Programming, Algorithms, \& Applications}, year = {2013}, month = {2013-00}, publisher = {Taylor \& Francis}, keywords = {plasma}, author = {Jakub Kurzak and Piotr Luszczek and Asim YarKhan and Mathieu Faverge and Julien Langou and Henricus Bouwmeester and Jack Dongarra}, editor = {Mohamed Ahmed and Reda Ammar and Sanguthevar Rajasekaran} } @techreport {702, title = {Optimal Checkpointing Period: Time vs. Energy}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 281)}, number = {ut-eecs-13-718}, year = {2013}, month = {2013-10}, publisher = {University of Tennessee}, author = {Guillaume Aupy and Anne Benoit and Thomas Herault and Yves Robert and Jack Dongarra} } @conference {705, title = {Parallel Reduction to Hessenberg Form with Algorithm-Based Fault Tolerance}, booktitle = {International Conference for High Performance Computing, Networking, Storage and Analysis, IEEE-SC 2013}, year = {2013}, month = {2013-11}, address = {Denver, CO}, abstract = {This paper studies the resilience of a two-sided factorization and presents a generic algorithm-based approach capable of making two-sided factorizations resilient. We establish the theoretical proof of the correctness and the numerical stability of the approach in the context of a Hessenberg Reduction (HR) and present the scalability and performance results of a practical implementation. Our method is a hybrid algorithm combining an Algorithm Based Fault Tolerance (ABFT) technique with diskless checkpointing to fully protect the data. We protect the trailing and the initial part of the matrix with checksums, and protect finished panels in the panel scope with diskless checkpoints. Compared with the original HR (the ScaLAPACK PDGEHRD routine) our fault-tolerant algorithm introduces very little overhead, and maintains the same level of scalability. We prove that the overhead shows a decreasing trend as the size of the matrix or the size of the process grid increases.}, author = {Yulu Jia and George Bosilca and Piotr Luszczek and Jack Dongarra} } @article {749, title = {PaRSEC: Exploiting Heterogeneity to Enhance Scalability}, journal = {IEEE Computing in Science and Engineering}, volume = {15}, year = {2013}, month = {2013-11}, pages = {36-45}, abstract = {New high-performance computing system designs with steeply escalating processor and core counts, burgeoning heterogeneity and accelerators, and increasingly unpredictable memory access times call for dramatically new programming paradigms. These new approaches must react and adapt quickly to unexpected contentions and delays, and they must provide the execution environment with sufficient intelligence and flexibility to rearrange the execution to improve resource utilization.}, doi = {10.1109/MCSE.2013.98}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Thomas Herault and Jack Dongarra} } @techreport {751, title = {Performance of Various Computers Using Standard Linear Equations Software}, journal = {University of Tennessee Computer Science Technical Report}, number = {cs-89-85}, year = {2013}, month = {2013-02}, abstract = {This report compares the performance of different computer systems in solving dense systems of linear equations. The comparison involves approximately a hundred computers, ranging from the Earth Simulator to personal computers.}, author = {Jack Dongarra} } @conference {753, title = {Portable HPC Programming on Intel Many-Integrated-Core Hardware with MAGMA Port to Xeon Phi}, booktitle = {PPAM 2013}, year = {2013}, month = {2013-09}, address = {Warsaw, Poland}, abstract = {This paper presents the design and implementation of several fundamental dense linear algebra (DLA) algorithms for multicore with Intel Xeon Phi Coprocessors. In particular, we consider algorithms for solving linear systems. Further, we give an overview of the MAGMA MIC library, an open source, high performance library that incorporates the developments presented, and in general provides to heterogeneous architectures of multicore with coprocessors the DLA functionality of the popular LAPACK library. The LAPACK-compliance simplifies the use of the MAGMA MIC library in applications, while providing them with portably performant DLA. High performance is obtained through use of the high-performance BLAS, hardware-specific tuning, and a hybridization methodology where we split the algorithm into computational tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components. Our methodology and programming techniques are incorporated into the MAGMA MIC API, which abstracts the application developer from the specifics of the Xeon Phi architecture and is therefore applicable to algorithms beyond the scope of DLA.}, keywords = {magma, mic, xeon phi}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Yulu Jia and Khairul Kabir and Piotr Luszczek and Stanimire Tomov} } @article {693, title = {Post-failure recovery of MPI communication capability: Design and rationale}, journal = {International Journal of High Performance Computing Applications}, volume = {27}, year = {2013}, month = {2013-01}, pages = {244 - 254}, abstract = {As supercomputers are entering an era of massive parallelism where the frequency of faults is increasing, the MPI Standard remains distressingly vague on the consequence of failures on MPI communications. Advanced fault-tolerance techniques have the potential to prevent full-scale application restart and therefore lower the cost incurred for each failure, but they demand from MPI the capability to detect failures and resume communications afterward. In this paper, we present a set of extensions to MPI that allow communication capabilities to be restored, while maintaining the extreme level of performance to which MPI users have become accustomed. The motivation behind the design choices are weighted against alternatives, a task that requires simultaneously considering MPI from the viewpoint of both the user and the implementor. The usability of the interfaces for expressing advanced recovery techniques is then discussed, including the difficult issue of enabling separate software layers to coordinate their recovery. }, issn = {1094-3420}, doi = {10.1177/1094342013488238}, url = {http://hpc.sagepub.com/cgi/doi/10.1177/1094342013488238}, author = {Wesley Bland and Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra} } @techreport {icl:735, title = {Revisiting the Double Checkpointing Algorithm}, journal = {University of Tennessee Computer Science Technical Report (LAWN 274)}, number = {ut-cs-13-705}, year = {2013}, month = {2013-01}, abstract = {Fast checkpointing algorithms require distributed access to stable storage. This paper revisits the approach base upon double checkpointing, and compares the blocking algorithm of Zheng, Shi and Kal{\'e}, with the non-blocking algorithm of Ni, Meneses and Kal{\'e} in terms of both performance and risk. We also extend the model that they have proposed to assess the impact of the overhead associated to non-blocking communications. We then provide a new peer-to-peer checkpointing algorithm, called the triple checkpointing algorithm, that can work at constant memory, and achieves both higher efficiency and better risk handling than the double checkpointing algorithm. We provide performance and risk models for all the evaluated protocols, and compare them through comprehensive simulations.}, keywords = {checkpoint algorithm, communication overlap, fault-tolerance, performance model, resilience}, author = {Jack Dongarra and Thomas Herault and Yves Robert} } @conference {717, title = {Revisiting the Double Checkpointing Algorithm}, booktitle = {15th Workshop on Advances in Parallel and Distributed Computational Models, at the IEEE International Parallel \& Distributed Processing Symposium}, year = {2013}, month = {2013-05}, address = {Boston, MA}, abstract = {Abstract{\textemdash}Fast checkpointing algorithms require distributed access to stable storage. This paper revisits the approach base upon double checkpointing, and compares the blocking algorithm of Zheng, Shi and Kale [1], with the non-blocking algorithm of Ni, Meneses and Kale [2] in terms of both performance and risk. We also extend the model proposed in [1], [2] to assess the impact of the overhead associated to non-blocking communications. We then provide a new peer-topeer checkpointing algorithm, called the triple checkpointing algorithm, that can work at constant memory, and achieves both higher efficiency and better risk handling than the double checkpointing algorithm. We provide performance and risk models for all the evaluated protocols, and compare them through comprehensive simulations.}, author = {Jack Dongarra and Thomas Herault and Yves Robert} } @inbook {695, title = {Scalable Dense Linear Algebra on Heterogeneous Hardware}, booktitle = {HPC: Transition Towards Exascale Processing, in the series Advances in Parallel Computing}, year = {2013}, abstract = {Abstract. Design of systems exceeding 1 Pflop/s and the push toward 1 Eflop/s, forced a dramatic shift in hardware design. Various physical and engineering constraints resulted in introduction of massive parallelism and functional hybridization with the use of accelerator units. This paradigm change brings about a serious challenge for application developers, as the management of multicore proliferation and heterogeneity rests on software. And it is reasonable to expect, that this situation will not change in the foreseeable future. This chapter presents a methodology of dealing with this issue in three common scenarios. In the context of shared-memory multicore installations, we show how high performance and scalability go hand in hand, when the well-known linear algebra algorithms are recast in terms of Direct Acyclic Graphs (DAGs), which are then transparently scheduled at runtime inside the Parallel Linear Algebra Software for Multicore Architectures (PLASMA) project. Similarly, Matrix Algebra on GPU and Multicore Architectures (MAGMA) schedules DAG-driven computations on multicore processors and accelerators. Finally, Distributed PLASMA (DPLASMA), takes the approach to distributed-memory machines with the use of automatic dependence analysis and the Direct Acyclic Graph Engine (DAGuE) to deliver high performance at the scale of many thousands of cores.}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {755, title = {Soft Error Resilient QR Factorization for Hybrid System with GPGPU}, journal = {Journal of Computational Science}, volume = {4}, year = {2013}, month = {2013-11}, pages = {457{\textendash}464}, abstract = {The general purpose graphics processing units (GPGPUs) are increasingly deployed for scientific computing due to their performance advantages over CPUs. What followed is the fact that fault tolerance has become a more serious concern compared to the period when GPGPUs were used exclusively for graphics applications. Using GPUs and CPUs together in a hybrid computing system increases flexibility and performance but also increases the possibility of the computations being affected by soft errors, for example, in the form of bit flips. In this work, we propose a soft error resilient algorithm for QR factorization on such hybrid systems. Our contributions include: (1) a checkpointing and recovery mechanism for the left-factor Q whose performance is scalable on hybrid systems; (2) optimized Givens rotation utilities on GPGPUs to efficiently reduce an upper Hessenberg matrix to an upper triangular form for the protection of the right factor R; and (3) a recovery algorithm based on QR update on GPGPUs. Experimental results show that our fault tolerant QR factorization can successfully detect and recover from soft errors in the entire matrix with little overhead on hybrid systems with GPGPUs.}, keywords = {gpgpu, gpu, magma}, doi = {http://dx.doi.org/10.1016/j.jocs.2013.01.004}, author = {Peng Du and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {761, title = {Standards for Graph Algorithm Primitives}, booktitle = {17th IEEE High Performance Extreme Computing Conference (HPEC {\textquoteright}13)}, year = {2013}, month = {2013-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {It is our view that the state of the art in constructing a large collection of graph algorithms in terms of linear algebraic operations is mature enough to support the emergence of a standard set of primitive building blocks. This paper is a position paper defining the problem and announcing our intention to launch an open effort to define this standard.}, keywords = {algorithms, graphs, linear algebra, software standards}, doi = {10.1109/HPEC.2013.6670338}, author = {Tim Mattson and David Bader and Jon Berry and Aydin Buluc and Jack Dongarra and Christos Faloutsos and John Feo and John Gilbert and Joseph Gonzalez and Bruce Hendrickson and Jeremy Kepner and Charles Lieserson and Andrew Lumsdaine and David Padua and Steve W. Poole and Steve Reinhardt and Mike Stonebraker and Steve Wallach and Andrew Yoo} } @techreport {683, title = {Toward a New Metric for Ranking High Performance Computing Systems}, journal = {SAND2013 - 4744}, year = {2013}, month = {2013-06}, abstract = {The High Performance Linpack (HPL), or Top 500, benchmark is the most widely recognized and discussed metric for ranking high performance computing systems. However, HPL is increasingly unreliable as a true measure of system performance for a growing collection of important science and engineering applications. In this paper we describe a new high performance conjugate gradient (HPCG) benchmark. HPCG is composed of computations and data access patterns more commonly found in applications. Using HPCG we strive for a better correlation to real scientific application performance and expect to drive computer system design and implementation in directions that will better impact performance improvement.}, url = {http://www.netlib.org/utk/people/JackDongarra/PAPERS/HPCG-Benchmark-utk.pdf}, author = {Michael A. Heroux and Jack Dongarra} } @conference {686, title = {Toward a scalable multi-GPU eigensolver via compute-intensive kernels and efficient communication}, booktitle = {Proceedings of the 27th ACM International Conference on Supercomputing (ICS {\textquoteright}13)}, year = {2013}, month = {2013-06}, publisher = {ACM Press}, organization = {ACM Press}, address = {Eugene, Oregon, USA}, abstract = {The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe a successful methodology on how to address the challenges---starting from our algorithm design, kernel optimization and tuning, to our programming model---in the development of a scalable high-performance tridiagonal reduction algorithm for the symmetric eigenvalue problem. This is a fundamental linear algebra problem with many engineering and physics applications. We use a combination of a task-based approach to parallelism and a new algorithmic design to achieve high performance. The goal of the new design is to increase the computational intensity of the major compute kernels and to reduce synchronization and data transfers between GPUs. This may increase the number of flops, but the increase is offset by the more efficient execution and reduced data transfers. Our performance results are the best available, providing an enormous performance boost compared to current state-of-the-art solutions. In particular, our software scales up to 1070 Gflop/s using 16 Intel E5-2670 cores and eight M2090 GPUs, compared to 45 Gflop/s achieved by the optimized Intel Math Kernel Library (MKL) using only the 16 CPU cores.}, keywords = {eigenvalue, gpu communication, gpu computation, heterogeneous programming model, performance, reduction to tridiagonal, singular value decomposiiton, task parallelism}, isbn = {9781450321303}, doi = {10.1145/2464996.2465438}, url = {http://dl.acm.org/citation.cfm?doid=2464996.2465438}, author = {Azzam Haidar and Mark Gates and Stanimire Tomov and Jack Dongarra}, editor = {Allen D. Malony and Nemirovsky, Mario and Midkiff, Sam} } @techreport {685, title = {Transient Error Resilient Hessenberg Reduction on GPU-based Hybrid Architectures}, journal = {UT-CS-13-712}, year = {2013}, month = {2013-06}, publisher = {University of Tennessee Computer Science Technical Report}, abstract = {Graphics Processing Units (GPUs) are gaining wide spread usage in the field of scientific computing owing to the performance boost GPUs bring to computation intensive applications. The typical configuration is to integrate GPUs and CPUs in the same system where the CPUs handle the control flow and part of the computation workload, and the GPUs serve as accelerators carry out the bulk of the data parallel compute workload. In this paper we design and implement a soft error resilient Hessenberg reduction algorithm on GPU based hybrid platforms. Our design employs algorithm based fault tolerance technique, diskless checkpointing and reverse computation. We detect and correct soft errors on-line without delaying the detection and correction to the end of the factorization. By utilizing idle time of the CPUs and overlapping both host side and GPU side workloads we minimize the observed overhead. Experiment results validated our design philosophy. Our algorithm introduces less than 2\% performance overhead compared to the non-fault tolerant hybrid Hessenberg reduction algorithm.}, author = {Yulu Jia and Piotr Luszczek and Jack Dongarra} } @article {701, title = {Tridiagonalization of a dense symmetric matrix on multiple GPUs and its application to symmetric eigenvalue problems}, journal = {Concurrency and Computation: Practice and Experience}, year = {2013}, month = {2013-10}, abstract = {For software to fully exploit the computing power of emerging heterogeneous computers, not only must the required computational kernels be optimized for the specific hardware architectures but also an effective scheduling scheme is needed to utilize the available heterogeneous computational units and to hide the communication between them. As a case study, we develop a static scheduling scheme for the tridiagonalization of a symmetric dense matrix on multicore CPUs with multiple graphics processing units (GPUs) on a single compute node. We then parallelize and optimize the Basic Linear Algebra Subroutines (BLAS)-2 symmetric matrix-vector multiplication, and the BLAS-3 low rank symmetric matrix updates on the GPUs. We demonstrate the good scalability of these multi-GPU BLAS kernels and the effectiveness of our scheduling scheme on twelve Intel Xeon processors and three NVIDIA GPUs. We then integrate our hybrid CPU-GPU kernel into computational kernels at higher-levels of software stacks, that is, a shared-memory dense eigensolver and a distributed-memory sparse eigensolver. Our experimental results show that our kernels greatly improve the performance of these higher-level kernels, not only reducing the solution time but also enabling the solution of larger-scale problems. Because such symmetric eigenvalue problems arise in many scientific and engineering simulations, our kernels could potentially lead to new scientific discoveries. Furthermore, these dense linear algebra algorithms present algorithmic characteristics that can be found in other algorithms. Hence, they are not only important computational kernels on their own but also useful testbeds to study the performance of the emerging computers and the effects of the various optimization techniques.}, author = {Ichitaro Yamazaki and Tingxing Dong and Raffaele Solc{\`a} and Stanimire Tomov and Jack Dongarra and Thomas C. Schulthess} } @conference {1328, title = {Tridiagonalization of a Symmetric Dense Matrix on a GPU Cluster}, booktitle = {The Third International Workshop on Accelerators and Hybrid Exascale Systems (AsHES)}, year = {2013}, month = {2013-05}, author = {Ichitaro Yamazaki and Tingxing Dong and Stanimire Tomov and Jack Dongarra} } @article {748, title = {Unified Model for Assessing Checkpointing Protocols at Extreme-Scale}, journal = {Concurrency and Computation: Practice and Experience}, year = {2013}, month = {2013-11}, abstract = {In this paper, we present a unified model for several well-known checkpoint/restart protocols. The proposed model is generic enough to encompass both extremes of the checkpoint/restart space, from coordinated approaches to a variety of uncoordinated checkpoint strategies (with message logging). We identify a set of crucial parameters, instantiate them, and compare the expected efficiency of the fault tolerant protocols, for a given application/platform pair. We then propose a detailed analysis of several scenarios, including some of the most powerful currently available high performance computing platforms, as well as anticipated Exascale designs. The results of this analytical comparison are corroborated by a comprehensive set of simulations. Altogether, they outline comparative behaviors of checkpoint strategies at very large scale, thereby providing insight that is hardly accessible to direct experimentation.}, doi = {10.1002/cpe.3173}, author = {George Bosilca and Aurelien Bouteiller and Elisabeth Brunet and Franck Cappello and Jack Dongarra and Amina Guermouche and Thomas Herault and Yves Robert and Frederic Vivien and Dounia Zaidouni} } @conference {icl:692, title = {Virtual Systolic Array for QR Decomposition}, booktitle = {15th Workshop on Advances in Parallel and Distributed Computational Models, IEEE International Parallel \& Distributed Processing Symposium (IPDPS 2013)}, year = {2013}, month = {2013-05}, publisher = {IEEE}, organization = {IEEE}, address = {Boston, MA}, abstract = {Systolic arrays offer a very attractive, data-centric, execution model as an alternative to the von Neumann architecture. Hardware implementations of systolic arrays turned out not to be viable solutions in the past. This article shows how the systolic design principles can be applied to a software solution to deliver an algorithm with unprecedented strong scaling capabilities. Systolic array for the QR decomposition is developed and a virtualization layer is used for mapping of the algorithm to a large distributed memory system. Strong scaling properties are discovered, superior to existing solutions.}, keywords = {dataflow programming, message passing, multi-core, QR decomposition, roofline model, systolic array}, doi = {10.1109/IPDPS.2013.119}, author = {Jakub Kurzak and Piotr Luszczek and Mark Gates and Ichitaro Yamazaki and Jack Dongarra} } @article {icl:731, title = {Acceleration of the BLAST Hydro Code on GPU}, journal = {Supercomputing {\textquoteright}12 (poster)}, year = {2012}, month = {2012-11}, publisher = {SC12}, address = {Salt Lake City, Utah}, author = {Tingxing Dong and Tzanio Kolev and Robert Rieben and Veselin Dobrev and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:672, title = {Algorithm-Based Fault Tolerance for Dense Matrix Factorization}, journal = {Proceedings of the 17th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, PPOPP 2012}, year = {2012}, month = {2012-02}, pages = {225-234}, publisher = {ACM}, address = {New Orleans, LA, USA}, abstract = {Dense matrix factorizations, such as LU, Cholesky and QR, are widely used for scientific applications that require solving systems of linear equations, eigenvalues and linear least squares problems. Such computations are normally carried out on supercomputers, whose ever-growing scale induces a fast decline of the Mean Time To Failure (MTTF). This paper proposes a new hybrid approach, based on Algorithm-Based Fault Tolerance (ABFT), to help matrix factorizations algorithms survive fail-stop failures. We consider extreme conditions, such as the absence of any reliable component and the possibility of loosing both data and checksum from a single failure. We will present a generic solution for protecting the right factor, where the updates are applied, of all above mentioned factorizations. For the left factor, where the panel has been applied, we propose a scalable checkpointing algorithm. This algorithm features high degree of checkpointing parallelism and cooperatively utilizes the checksum storage leftover from the right factor protection. The fault-tolerant algorithms derived from this hybrid solution is applicable to a wide range of dense matrix factorizations, with minor modifications. Theoretical analysis shows that the fault tolerance overhead sharply decreases with the scaling in the number of computing units and the problem size. Experimental results of LU and QR factorization on the Kraken (Cray XT5) supercomputer validate the theoretical evaluation and confirm negligible overhead, with- and without-errors.}, keywords = {ft-la, ftmpi}, doi = {10.1145/2145816.2145845}, author = {Peng Du and Aurelien Bouteiller and George Bosilca and Thomas Herault and Jack Dongarra}, editor = {J. Ramanujam and P. Sadayappan} } @techreport {688, title = {On Algorithmic Variants of Parallel Gaussian Elimination: Comparison of Implementations in Terms of Performance and Numerical Properties}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-13-715}, year = {2012}, month = {2013-07}, abstract = {Gaussian elimination is a canonical linear algebra procedure for solving linear systems of equations. In the last few years, the algorithm received a lot of attention in an attempt to improve its parallel performance. This article surveys recent developments in parallel implementations of the Gaussian elimination. Five different flavors are investigated. Three of them are based on different strategies for pivoting: partial pivoting, incremental pivoting, and tournament pivoting. The fourth one replaces pivoting with the Random Butterfly Transformation, and finally, an implementation without pivoting is used as a performance baseline. The technique of iterative refinement is applied to recover numerical accuracy when necessary. All parallel implementations are produced using dynamic, superscalar, runtime scheduling and tile matrix layout. Results on two multi-socket multicore systems are presented. Performance and numerical accuracy is analyzed.}, author = {Simplice Donfack and Jack Dongarra and Mathieu Faverge and Mark Gates and Jakub Kurzak and Piotr Luszczek and Ichitaro Yamazaki} } @inproceedings {icl:712, title = {Anatomy of a Globally Recursive Embedded LINPACK Benchmark}, journal = {2012 IEEE High Performance Extreme Computing Conference}, year = {2012}, month = {2012-09}, pages = {1-6}, address = {Waltham, MA}, abstract = {We present a complete bottom-up implementation of an embedded LINPACK benchmark on iPad 2. We use a novel formulation of a recursive LU factorization that is recursive and parallel at the global scope. We be believe our new algorithm presents an alternative to existing linear algebra parallelization techniques such as master-worker and DAG-based approaches. We show a assembly API that allows us a much higher level of abstraction and provides rapid code development within the confines of mobile device SDK. We use performance modeling to help with the limitation of the device and the limited access to device from the development environment not geared for HPC application tuning.}, isbn = {978-1-4673-1577-7}, doi = {10.1109/HPEC.2012.6408679}, author = {Piotr Luszczek and Jack Dongarra} } @article {, title = {Autotuning GEMM Kernels for the Fermi GPU}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {23}, number = {11}, year = {2012}, month = {2012-11}, abstract = {Abstract{\textemdash}In recent years, the use of graphics chips has been recognized as a viable way of accelerating scientific and engineering applications, even more so since the introduction of the Fermi architecture by NVIDIA, with features essential to numerical computing, such as fast double precision arithmetic and memory protected with error correction codes. Being the crucial component of numerical software packages, such as LAPACK and ScaLAPACK, the general dense matrix multiplication routine is one of the more important workloads to be implemented on these devices. This paper presents a methodology for producing matrix multiplication kernels tuned for a specific architecture, through a canonical process of heuristic autotuning, based on generation of multiple code variants and selecting the fastest ones through benchmarking. The key contribution of this work is in the method for generating the search space; specifically, pruning it to a manageable size. Performance numbers match or exceed other available implementations.}, doi = {https://doi.org/10.1109/TPDS.2011.311}, author = {Jakub Kurzak and Stanimire Tomov and Jack Dongarra} } @article {icl:697, title = {Block-asynchronous Multigrid Smoothers for GPU-accelerated Systems}, journal = {ICCS 2012}, year = {2012}, month = {2012-06}, address = {Omaha, NE}, author = {Hartwig Anzt and Stanimire Tomov and Mark Gates and Jack Dongarra and Vincent Heuveline} } @inproceedings {icl:679, title = {A Checkpoint-on-Failure Protocol for Algorithm-Based Recovery in Standard MPI}, journal = {18th International European Conference on Parallel and Distributed Computing (Euro-Par 2012) (Best Paper Award)}, year = {2012}, month = {2012-08}, publisher = {Springer-Verlag}, address = {Rhodes, Greece}, author = {Wesley Bland and Peng Du and Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra}, editor = {Christos Kaklamanis and Theodore Papatheodorou and Paul Spirakis} } @inproceedings {icl:685, title = {A Class of Communication-Avoiding Algorithms for Solving General Dense Linear Systems on CPU/GPU Parallel Machines}, journal = {Proc. of the International Conference on Computational Science (ICCS)}, volume = {9}, year = {2012}, month = {2012-06}, pages = {17-26}, keywords = {magma}, author = {Marc Baboulin and Simplice Donfack and Jack Dongarra and Laura Grigori and Adrien Remi and Stanimire Tomov} } @article {icl:695, title = {A Comprehensive Study of Task Coalescing for Selecting Parallelism Granularity in a Two-Stage Bidiagonal Reduction}, journal = {IPDPS 2012}, year = {2012}, month = {2012-05}, address = {Shanghai, China}, author = {Azzam Haidar and Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @article {icl:670, title = {DAGuE: A generic distributed DAG Engine for High Performance Computing.}, journal = {Parallel Computing}, volume = {38}, number = {1-2}, year = {2012}, month = {2012-00}, pages = {27-51}, publisher = {Elsevier}, keywords = {dague, parsec}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Pierre Lemariner and Jack Dongarra} } @article {icl:703, title = {Dense Linear Algebra on Accelerated Multicore Hardware}, journal = {High Performance Scientific Computing: Algorithms and Applications}, year = {2012}, month = {2012-00}, publisher = {Springer-Verlag}, address = {London, UK}, author = {Jack Dongarra and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov}, editor = {Michael Berry and et al.,} } @article {icl:684, title = {Divide and Conquer on Hybrid GPU-Accelerated Multicore Systems}, journal = {SIAM Journal on Scientific Computing}, volume = {34(2)}, year = {2012}, month = {2012-04}, pages = {C70-C82}, keywords = {magma}, author = {Christof Voemel and Stanimire Tomov and Jack Dongarra} } @techreport {icl:683, title = {An efficient distributed randomized solver with application to large dense linear systems}, journal = {ICL Technical Report}, number = {ICL-UT-12-02}, year = {2012}, month = {2012-07}, keywords = {dague, dplasma, parsec, plasma}, author = {Marc Baboulin and Dulceneia Becker and George Bosilca and Anthony Danalis and Jack Dongarra} } @inproceedings {icl:669, title = {Enabling and Scaling Matrix Computations on Heterogeneous Multi-Core and Multi-GPU Systems}, journal = {26th ACM International Conference on Supercomputing (ICS 2012)}, year = {2012}, month = {2012-06}, publisher = {ACM}, address = {San Servolo Island, Venice, Italy}, keywords = {magma}, author = {Fengguang Song and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:711, title = {Energy Footprint of Advanced Dense Numerical Linear Algebra using Tile Algorithms on Multicore Architecture}, journal = {The 2nd International Conference on Cloud and Green Computing (submitted)}, year = {2012}, month = {2012-11}, address = {Xiangtan, Hunan, China}, author = {Jack Dongarra and Hatem Ltaeif and Piotr Luszczek and Vincent M Weaver} } @article {icl:707, title = {Enhancing Parallelism of Tile Bidiagonal Transformation on Multicore Architectures using Tree Reduction}, journal = {Lecture Notes in Computer Science}, volume = {7203}, year = {2012}, month = {2012-09}, pages = {661-670}, author = {Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @inproceedings {icl:680, title = {An Evaluation of User-Level Failure Mitigation Support in MPI}, journal = {Proceedings of Recent Advances in Message Passing Interface - 19th European MPI Users{\textquoteright} Group Meeting, EuroMPI 2012}, year = {2012}, month = {2012-09}, publisher = {Springer}, address = {Vienna, Austria}, author = {Wesley Bland and Aurelien Bouteiller and Thomas Herault and Joshua Hursey and George Bosilca and Jack Dongarra} } @techreport {icl:724, title = {Extending the Scope of the Checkpoint-on-Failure Protocol for Forward Recovery in Standard MPI}, journal = {University of Tennessee Computer Science Technical Report}, number = {ut-cs-12-702}, year = {2012}, month = {2012-00}, keywords = {ftmpi}, author = {Wesley Bland and Peng Du and Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra} } @article {icl:725, title = {From CUDA to OpenCL: Towards a Performance-portable Solution for Multi-platform GPU Programming}, journal = {Parallel Computing}, volume = {38}, number = {8}, year = {2012}, month = {2012-08}, pages = {391-407}, author = {Peng Du and Rick Weber and Piotr Luszczek and Stanimire Tomov and Gregory D. Peterson and Jack Dongarra} } @conference {icl:699, title = {From Serial Loops to Parallel Execution on Distributed Systems}, booktitle = {International European Conference on Parallel and Distributed Computing (Euro-Par {\textquoteright}12)}, year = {2012}, month = {2012-08}, address = {Rhodes, Greece}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Jack Dongarra} } @article {1356, title = {The Future of Computing: Software Libraries}, year = {2012}, month = {2012-02}, publisher = {DOD CREATE Developers{\textquoteright} Review, Keynote Presentation}, address = {Savannah, GA}, author = {Stanimire Tomov and Jack Dongarra} } @article {icl:723, title = {GPU-Accelerated Asynchronous Error Correction for Mixed Precision Iterative Refinement}, journal = {EuroPar 2012 (also LAWN 260)}, year = {2012}, month = {2012-08}, address = {Rhodes Island, Greece}, author = {Hartwig Anzt and Piotr Luszczek and Jack Dongarra and Vincent Heuveline} } @inproceedings {icl:687, title = {Hierarchical QR Factorization Algorithms for Multi-Core Cluster Systems}, journal = {IPDPS 2012, the 26th IEEE International Parallel and Distributed Processing Symposium}, year = {2012}, month = {2012-05}, publisher = {IEEE Computer Society Press}, address = {Shanghai, China}, author = {Jack Dongarra and Mathieu Faverge and Thomas Herault and Julien Langou and Yves Robert} } @article {icl:700, title = {HierKNEM: An Adaptive Framework for Kernel-Assisted and Topology-Aware Collective Communications on Many-core Clusters}, journal = {IPDPS 2012 (Best Paper)}, year = {2012}, month = {2012-05}, address = {Shanghai, China}, author = {Teng Ma and George Bosilca and Aurelien Bouteiller and Jack Dongarra} } @article {icl:694, title = {High Performance Computing Systems: Status and Outlook}, journal = {Acta Numerica}, volume = {21}, year = {2012}, month = {2012-05}, pages = {379-474}, publisher = {Cambridge University Press}, address = {Cambridge, UK}, author = {Jack Dongarra and Aad J. van der Steen} } @article {icl:708, title = {High Performance Dense Linear System Solver with Resilience to Multiple Soft Errors}, journal = {ICCS 2012}, year = {2012}, month = {2012-06}, address = {Omaha, NE}, author = {Peng Du and Piotr Luszczek and Jack Dongarra} } @article {icl:706, title = {HPC Challenge: Design, History, and Implementation Highlights}, journal = {On the Road to Exascale Computing: Contemporary Architectures in High Performance Computing (to appear)}, year = {2012}, month = {2012-00}, publisher = {Chapman \& Hall/CRC Press}, author = {Jack Dongarra and Piotr Luszczek}, editor = {Jeffrey Vetter} } @article {icl:690, title = {An Implementation of the Tile QR Factorization for a GPU and Multiple CPUs}, journal = {Applied Parallel and Scientific Computing}, volume = {7133}, year = {2012}, month = {2012-00}, pages = {248-257}, author = {Jakub Kurzak and Rajib Nath and Peng Du and Jack Dongarra}, editor = {Kristj{\'a}n J{\'o}nasson} } @article {icl:705, title = {Looking Back at Dense Linear Algebra Software}, journal = {Perspectives on Parallel and Distributed Processing: Looking Back and What{\textquoteright}s Ahead (to appear)}, year = {2012}, month = {2012-00}, author = {Piotr Luszczek and Jakub Kurzak and Jack Dongarra}, editor = {Viktor K. Prasanna and Yves Robert and Per Stenstr{\"o}m} } @article {1355, title = {MAGMA: A Breakthrough in Solvers for Eigenvalue Problems}, year = {2012}, month = {2012-05}, publisher = {GPU Technology Conference (GTC12), Presentation}, address = {San Jose, CA}, author = {Stanimire Tomov and Jack Dongarra and Azzam Haidar and Ichitaro Yamazaki and Tingxing Dong and Thomas Schulthess and Raffaele Solc{\`a}} } @article {1349, title = {MAGMA: A New Generation of Linear Algebra Library for GPU and Multicore Architectures}, year = {2012}, month = {2012-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC12), Presentation}, address = {Salt Lake City, UT}, author = {Jack Dongarra and Tingxing Dong and Mark Gates and Azzam Haidar and Stanimire Tomov and Ichitaro Yamazaki} } @article {1354, title = {MAGMA MIC: Linear Algebra Library for Intel Xeon Phi Coprocessors}, year = {2012}, month = {2012-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC12)}, address = {Salt Lake City, UT}, author = {Jack Dongarra and Mark Gates and Yulu Jia and Khairul Kabir and Piotr Luszczek and Stanimire Tomov} } @article {icl:730, title = {Matrices Over Runtime Systems at Exascale}, journal = {Supercomputing {\textquoteright}12 (poster)}, year = {2012}, month = {2012-11}, address = {Salt Lake City, Utah}, author = {Emmanuel Agullo and George Bosilca and Cedric Castagn{\`e}de and Jack Dongarra and Hatem Ltaeif and Stanimire Tomov} } @article {icl:729, title = {A Novel Hybrid CPU-GPU Generalized Eigensolver for Electronic Structure Calculations Based on Fine Grained Memory Aware Tasks}, journal = {Supercomputing {\textquoteright}12 (poster)}, year = {2012}, month = {2012-11}, address = {Salt Lake City, Utah}, author = {Raffaele Solc{\`a} and Azzam Haidar and Stanimire Tomov and Jack Dongarra and Thomas C. Schulthess} } @inproceedings {icl:678, title = {One-Sided Dense Matrix Factorizations on a Multicore with Multiple GPU Accelerators}, journal = {The International Conference on Computational Science (ICCS)}, year = {2012}, month = {2012-06}, keywords = {magma}, author = {Ichitaro Yamazaki and Stanimire Tomov and Jack Dongarra} } @article {icl:709, title = {Optimizing Memory-Bound Numerical Kernels on GPU Hardware Accelerators}, journal = {VECPAR 2012}, year = {2012}, month = {2012-07}, address = {Kobe, Japan}, author = {Ahmad Abdelfattah and Jack Dongarra and David Keyes and Hatem Ltaeif} } @article {icl:727, title = {Parallel Processing and Applied Mathematics, 9th International Conference, PPAM 2011}, journal = {Lecture Notes in Computer Science}, volume = {7203}, year = {2012}, month = {2012-00}, address = {Torun, Poland}, editor = {Roman Wyrzykowski and Jack Dongarra and Konrad Karczewski and Jerzy Wasniewski} } @article {icl:696, title = {A Parallel Tiled Solver for Symmetric Indefinite Systems On Multicore Architectures}, journal = {IPDPS 2012}, year = {2012}, month = {2012-05}, address = {Shanghai, China}, author = {Marc Baboulin and Dulceneia Becker and Jack Dongarra} } @techreport {icl:714, title = {Performance evaluation of LU factorization through hardware counter measurements}, journal = {University of Tennessee Computer Science Technical Report}, number = {ut-cs-12-700}, year = {2012}, month = {2012-10}, author = {Simplice Donfack and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:710, title = {Power Profiling of Cholesky and QR Factorizations on Distributed Memory Systems}, journal = {Third International Conference on Energy-Aware High Performance Computing}, year = {2012}, month = {2012-09}, address = {Hamburg, Germany}, author = {George Bosilca and Jack Dongarra and Hatem Ltaeif} } @article {icl:718, title = {Preliminary Results of Autotuning GEMM Kernels for the NVIDIA Kepler Architecture}, journal = {LAWN 267}, year = {2012}, month = {2012-00}, author = {Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:665, title = {Programming the LU Factorization for a Multicore System with Accelerators}, journal = {Proceedings of VECPAR{\textquoteright}12}, year = {2012}, month = {2012-04}, address = {Kobe, Japan}, keywords = {plasma, quark}, author = {Jakub Kurzak and Piotr Luszczek and Mathieu Faverge and Jack Dongarra} } @techreport {icl:667, title = {A Proposal for User-Level Failure Mitigation in the MPI-3 Standard}, journal = {University of Tennessee Electrical Engineering and Computer Science Technical Report}, number = {ut-cs-12-693}, year = {2012}, month = {2012-02}, publisher = {University of Tennessee}, keywords = {ftmpi}, author = {Wesley Bland and George Bosilca and Aurelien Bouteiller and Thomas Herault and Jack Dongarra} } @techreport {icl:715, title = {Providing GPU Capability to LU and QR within the ScaLAPACK Framework}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 272)}, number = {UT-CS-12-699}, year = {2012}, month = {2012-09}, author = {Peng Du and Stanimire Tomov and Jack Dongarra} } @article {icl:728, title = {Recent Advances in the Message Passing Interface: 19th European MPI Users{\textquoteright} Group Meeting, EuroMPI 2012}, journal = {Lecture Notes in Computer Science}, volume = {7490}, year = {2012}, month = {2012-00}, address = {Vienna, Austria}, editor = {Jesper Larsson Tr{\"a}ff and Siegfried Benkner and Jack Dongarra} } @article {icl:722, title = {Reducing the Amount of Pivoting in Symmetric Indefinite Systems}, journal = {Parallel Processing and Applied Mathematics, Lecture Notes in Computer Science (PPAM 2011)}, volume = {7203}, year = {2012}, month = {2012-00}, pages = {133-142}, publisher = {Springer-Verlag Berlin Heidelberg}, author = {Dulceneia Becker and Marc Baboulin and Jack Dongarra}, editor = {Roman Wyrzykowski and Jack Dongarra and Konrad Karczewski and Jerzy Wasniewski} } @inproceedings {icl:681, title = {A Scalable Framework for Heterogeneous GPU-Based Clusters}, journal = {The 24th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA 2012)}, year = {2012}, month = {2012-06}, publisher = {ACM}, address = {Pittsburgh, PA, USA}, keywords = {magma}, author = {Fengguang Song and Jack Dongarra} } @article {icl:726, title = {Toward High Performance Divide and Conquer Eigensolver for Dense Symmetric Matrices}, journal = {SIAM Journal on Scientific Computing (Accepted)}, year = {2012}, month = {2012-07}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:716, title = {Unified Model for Assessing Checkpointing Protocols at Extreme-Scale}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 269)}, number = {UT-CS-12-697}, year = {2012}, month = {2012-06}, author = {George Bosilca and Aurelien Bouteiller and Elisabeth Brunet and Franck Cappello and Jack Dongarra and Amina Guermouche and Thomas Herault and Yves Robert and Frederic Vivien and Dounia Zaidouni} } @inproceedings {icl:713, title = {Weighted Block-Asynchronous Iteration on GPU-Accelerated Systems}, journal = {Tenth International Workshop on Algorithms, Models and Tools for Parallel Computing on Heterogeneous Platforms (Best Paper)}, year = {2012}, month = {2012-08}, address = {Rhodes Island, Greece}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra and Vincent Heuveline} } @article {icl:701, title = {Weighted Block-Asynchronous Relaxation for GPU-Accelerated Systems}, journal = {SIAM Journal on Computing (submitted)}, year = {2012}, month = {2012-03}, author = {Hartwig Anzt and Jack Dongarra and Vincent Heuveline} } @article {icl:637, title = {Accelerating Linear System Solutions Using Randomization Techniques}, journal = {INRIA RR-7616 / LAWN $\#$246 (presented at International AMMCS{\textquoteright}11)}, year = {2011}, month = {2011-07}, address = {Waterloo, Ontario, Canada}, keywords = {magma}, author = {Marc Baboulin and Jack Dongarra and Julien Herrmann and Stanimire Tomov} } @techreport {icl:660, title = {Achieving Numerical Accuracy and High Performance using Recursive Tile LU Factorization}, journal = {University of Tennessee Computer Science Technical Report (also as a LAWN)}, number = {ICL-UT-11-08}, year = {2011}, month = {2011-09}, keywords = {plasma, quark}, author = {Jack Dongarra and Mathieu Faverge and Hatem Ltaeif and Piotr Luszczek} } @techreport {icl:626, title = {Algorithm-based Fault Tolerance for Dense Matrix Factorizations}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-11-676}, year = {2011}, month = {2011-08}, address = {Knoxville, TN}, keywords = {ft-la}, author = {Peng Du and Aurelien Bouteiller and George Bosilca and Thomas Herault and Jack Dongarra} } @techreport {icl:631, title = {Analysis of Dynamically Scheduled Tile Algorithms for Dense Linear Algebra on Multicore Architectures}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-666, (also Lawn 243)}, year = {2011}, month = {2011-03}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Asim YarKhan and Jack Dongarra} } @techreport {icl:630, title = {Autotuning GEMMs for Fermi}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-671, (also Lawn 245)}, year = {2011}, month = {2011-04}, keywords = {magma}, author = {Jakub Kurzak and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:591, title = {BlackjackBench: Hardware Characterization with Portable Micro-Benchmarks and Automatic Statistical Analysis of Results}, journal = {IEEE International Parallel and Distributed Processing Symposium (submitted)}, year = {2011}, month = {2011-05}, address = {Anchorage, AK}, author = {Anthony Danalis and Piotr Luszczek and Gabriel Marin and Jeffrey Vetter and Jack Dongarra} } @article {icl:661, title = {Block-asynchronous Multigrid Smoothers for GPU-accelerated Systems}, number = {UT-CS-11-689}, year = {2011}, month = {2011-12}, keywords = {magma}, author = {Hartwig Anzt and Stanimire Tomov and Mark Gates and Jack Dongarra and Vincent Heuveline} } @techreport {icl:656, title = {A Block-Asynchronous Relaxation Method for Graphics Processing Units}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-11-687 / LAWN 258}, year = {2011}, month = {2011-11}, keywords = {magma}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra and Vincent Heuveline} } @article {icl:587, title = {Changes in Dense Linear Algebra Kernels - Decades Long Perspective}, journal = {in Solving the Schrodinger Equation: Has everything been tried? (to appear)}, year = {2011}, month = {2011-00}, publisher = {Imperial College Press}, author = {Piotr Luszczek and Jakub Kurzak and Jack Dongarra}, editor = {P. Popular} } @inproceedings {icl:640, title = {A Class of Hybrid LAPACK Algorithms for Multicore and GPU Architectures}, journal = {Symposium for Application Accelerators in High Performance Computing (SAAHPC{\textquoteright}11)}, year = {2011}, month = {2011-07}, address = {Knoxville, TN}, keywords = {magma, quark}, author = {Mitch Horton and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:673, title = {Correlated Set Coordination in Fault Tolerant Message Logging Protocols}, journal = {Proceedings of 17th International Conference, Euro-Par 2011, Part II}, volume = {6853}, year = {2011}, month = {2011-08}, pages = {51-64}, publisher = {Springer}, address = {Bordeaux, France}, keywords = {ftmpi}, author = {Aurelien Bouteiller and Thomas Herault and George Bosilca and Jack Dongarra}, editor = {Emmanuel Jeannot and Raymond Namyst and Jean Roman} } @inproceedings {icl:675, title = {DAGuE: A Generic Distributed DAG Engine for High Performance Computing}, journal = {Proceedings of the Workshops of the 25th IEEE International Symposium on Parallel and Distributed Processing (IPDPS 2011 Workshops)}, year = {2011}, month = {2011-00}, pages = {1151-1158}, publisher = {IEEE}, address = {Anchorage, Alaska, USA}, keywords = {dague, parsec}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Pierre Lemariner and Jack Dongarra} } @techreport {icl:628, title = {Efficient Support for Matrix Computations on Heterogeneous Multi-core and Multi-GPU Architectures}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-668, (also Lawn 250)}, year = {2011}, month = {2011-06}, keywords = {magma, plasma}, author = {Fengguang Song and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:616, title = {Evaluation of the HPC Challenge Benchmarks in Virtualized Environments}, journal = {6th Workshop on Virtualization in High-Performance Cloud Computing}, year = {2011}, month = {2011-08}, address = {Bordeaux, France}, keywords = {hpcc}, author = {Piotr Luszczek and Eric Meek and Shirley Moore and Dan Terpstra and Vincent M Weaver and Jack Dongarra} } @inproceedings {icl:611, title = {Exploiting Fine-Grain Parallelism in Recursive LU Factorization}, journal = {Proceedings of PARCO{\textquoteright}11}, number = {ICL-UT-11-04}, year = {2011}, month = {2011-04}, address = {Gent, Belgium}, keywords = {plasma}, author = {Jack Dongarra and Mathieu Faverge and Hatem Ltaeif and Piotr Luszczek} } @inproceedings {icl:676, title = {Flexible Development of Dense Linear Algebra Algorithms on Massively Parallel Architectures with DPLASMA}, journal = {Proceedings of the Workshops of the 25th IEEE International Symposium on Parallel and Distributed Processing (IPDPS 2011 Workshops)}, year = {2011}, month = {2011-05}, pages = {1432-1441}, publisher = {IEEE}, address = {Anchorage, Alaska, USA}, keywords = {dague, dplasma, parsec}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @techreport {icl:662, title = {GPU-Accelerated Asynchronous Error Correction for Mixed Precision Iterative Refinement}, journal = {University of Tennessee Computer Science Technical Report UT-CS-11-690 (also Lawn 260)}, year = {2011}, month = {2011-12}, keywords = {magma}, author = {Hartwig Anzt and Piotr Luszczek and Jack Dongarra and Vincent Heuveline} } @techreport {icl:645, title = {Hierarchical QR Factorization Algorithms for Multi-Core Cluster Systems}, journal = {University of Tennessee Computer Science Technical Report (also Lawn 257)}, number = {UT-CS-11-684}, year = {2011}, month = {2011-10}, keywords = {magma, plasma}, author = {Jack Dongarra and Mathieu Faverge and Thomas Herault and Julien Langou and Yves Robert} } @techreport {icl:629, title = {High Performance Bidiagonal Reduction using Tile Algorithms on Homogeneous Multicore Architectures}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-673, (also Lawn 247)}, year = {2011}, month = {2011-05}, keywords = {plasma}, author = {Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @article {icl:622, title = {High Performance Dense Linear System Solver with Soft Error Resilience}, journal = {IEEE Cluster 2011}, year = {2011}, month = {2011-09}, address = {Austin, TX}, keywords = {ft-la}, author = {Peng Du and Piotr Luszczek and Jack Dongarra} } @inproceedings {icl:658, title = {High Performance Matrix Inversion Based on LU Factorization for Multicore Architectures}, journal = {Proceedings of MTAGS11}, year = {2011}, month = {2011-11}, address = {Seattle, WA}, author = {Jack Dongarra and Mathieu Faverge and Hatem Ltaeif and Piotr Luszczek} } @article {, title = {High-Performance High-Resolution Semi-Lagrangian Tracer Transport on a Sphere}, journal = {Journal of Computational Physics}, volume = {230}, year = {2011}, month = {2011-07}, pages = {6778-6799}, abstract = {Current climate models have a limited ability to increase spatial resolution because numerical stability requires the time step to decrease. We describe a semi-Lagrangian method for tracer transport that is stable for arbitrary Courant numbers, and we test a parallel implementation discretized on the cubed sphere. The method includes a fixer that conserves mass and constrains tracers to a physical range of values. The method shows third-order convergence and maintains nonlinear tracer correlations to second order. It shows optimal accuracy at Courant numbers of 10{\textendash}20, more than an order of magnitude higher than explicit methods. We present parallel performance in terms of strong scaling, weak scaling, and spatial scaling (where the time step stays constant while the resolution increases). For a 0.2{\textdegree} test with 100 tracers, the implementation scales efficiently to 10,000 MPI tasks.}, keywords = {cubed sphere, high resolution, High-performance computing, semi-Lagrangian, spherical geometry, tracer transport}, doi = {https://doi.org/10.1016/j.jcp.2011.05.008}, author = {James B. White and Jack Dongarra} } @article {icl:653, title = {A Hybridization Methodology for High-Performance Linear Algebra Software for GPUs}, journal = {in GPU Computing Gems, Jade Edition}, volume = {2}, year = {2011}, month = {2011-00}, pages = {473-484}, publisher = {Elsevier}, keywords = {magma, morse}, author = {Emmanuel Agullo and Cedric Augonnet and Jack Dongarra and Hatem Ltaeif and Raymond Namyst and Samuel Thibault and Stanimire Tomov}, editor = {Wen-mei W. Hwu} } @article {icl:646, title = {Impact of Kernel-Assisted MPI Communication over Scientific Applications: CPMD and FFTW}, journal = {18th EuroMPI}, year = {2011}, month = {2011-09}, pages = {247-254}, publisher = {Springer}, address = {Santorini, Greece}, keywords = {dague}, author = {Teng Ma and Aurelien Bouteiller and George Bosilca and Jack Dongarra}, editor = {Yiannis Cotronis and Anthony Danalis and Dimitrios S. Nikolopoulos and Jack Dongarra} } @article {icl:643, title = {The International Exascale Software Project Roadmap}, journal = {International Journal of High Performance Computing}, volume = {25}, number = {1}, year = {2011}, month = {2011-01}, pages = {3-60}, abstract = {Over the last 20 years, the open-source community has provided more and more software on which the world{\textquoteright}s high-performance computing systems depend for performance and productivity. The community has invested millions of dollars and years of effort to build key components. However, although the investments in these separate software elements have been tremendously valuable, a great deal of productivity has also been lost because of the lack of planning, coordination, and key integration of technologies necessary to make them work together smoothly and efficiently, both within individual petascale systems and between different systems. It seems clear that this completely uncoordinated development model will not provide the software needed to support the unprecedented parallelism required for peta/ exascale computation on millions of cores, or the flexibility required to exploit new hardware models and features, such as transactional memory, speculative execution, and graphics processing units. This report describes the work of the community to prepare for the challenges of exascale computing, ultimately combing their efforts in a coordinated International Exascale Software Project.}, doi = {https://doi.org/10.1177/1094342010391989}, author = {Jack Dongarra and Pete Beckman and Terry Moore and Patrick Aerts and Giovanni Aloisio and Jean-Claude Andre and David Barkai and Jean-Yves Berthou and Taisuke Boku and Bertrand Braunschweig and Franck Cappello and Barbara Chapman and Xuebin Chi and Alok Choudhary and Sudip Dosanjh and Thom Dunning and Sandro Fiore and Al Geist and Bill Gropp and Robert Harrison and Mark Hereld and Michael Heroux and Adolfy Hoisie and Koh Hotta and Zhong Jin and Yutaka Ishikawa and Fred Johnson and Sanjay Kale and Richard Kenway and David Keyes and Bill Kramer and Jesus Labarta and Alain Lichnewsky and Thomas Lippert and Bob Lucas and Barney MacCabe and Satoshi Matsuoka and Paul Messina and Peter Michielse and Bernd Mohr and Matthias S. Mueller and Wolfgang E. Nagel and Hiroshi Nakashima and Michael E. Papka and Dan Reed and Mitsuhisa Sato and Ed Seidel and John Shalf and David Skinner and Marc Snir and Thomas Sterling and Rick Stevens and Fred Streitz and Bob Sugar and Shinji Sumimoto and William Tang and John Taylor and Rajeev Thakur and Anne Trefethen and Mateo Valero and Aad van der Steen and Jeffrey Vetter and Peg Williams and Robert Wisniewski and Kathy Yelick} } @article {, title = {Keeneland: Bringing Heterogeneous GPU Computing to the Computational Science Community}, journal = {IEEE Computing in Science \& Engineering}, volume = {13}, year = {2011}, month = {2011-08}, pages = {90-95}, abstract = {The Keeneland project{\textquoteright}s goal is to develop and deploy an innovative, GPU-based high-performance computing system for the NSF computational science community.}, keywords = {Benchmark testing, Computational modeling, Computer architecture, Graphics processing unit, Hardware, Random access memory, Scientific computing}, doi = {https://doi.org/10.1109/MCSE.2011.83}, author = {Jeffrey Vetter and Richard Glassbrook and Jack Dongarra and Karsten Schwan and Bruce Loftis and Stephen McNally and Jeremy Meredith and James Rogers and Philip Roth and Kyle Spafford and Sudhakar Yalamanchili} } @inproceedings {icl:649, title = {Kernel Assisted Collective Intra-node MPI Communication Among Multi-core and Many-core CPUs}, journal = {Int{\textquoteright}l Conference on Parallel Processing (ICPP {\textquoteright}11)}, year = {2011}, month = {2011-09}, address = {Taipei, Taiwan}, author = {Teng Ma and George Bosilca and Aurelien Bouteiller and Brice Goglin and J. Squyres and Jack Dongarra} } @article {icl:599, title = {LU Factorization for Accelerator-Based Systems}, journal = {IEEE/ACS AICCSA 2011}, year = {2011}, month = {2011-12}, address = {Sharm-El-Sheikh, Egypt}, keywords = {magma, morse}, author = {Emmanuel Agullo and Cedric Augonnet and Jack Dongarra and Mathieu Faverge and Julien Langou and Hatem Ltaeif and Stanimire Tomov} } @article {1359, title = {MAGMA - LAPACK for HPC on Heterogeneous Architectures}, year = {2011}, month = {2011-08}, publisher = {Titan Summit at Oak Ridge National Laboratory, Presentation}, address = {Oak Ridge, TN}, author = {Stanimire Tomov and Jack Dongarra} } @article {icl:647, title = {OMPIO: A Modular Software Architecture for MPI I/O}, journal = {18th EuroMPI}, year = {2011}, month = {2011-09}, pages = {81-89}, publisher = {Springer}, address = {Santorini, Greece}, author = {Mohamad Chaarawi and Edgar Gabriel and Rainer Keller and Richard L. Graham and George Bosilca and Jack Dongarra}, editor = {Yiannis Cotronis and Anthony Danalis and Dimitrios S. Nikolopoulos and Jack Dongarra} } @inproceedings {icl:632, title = {Optimizing Symmetric Dense Matrix-Vector Multiplication on GPUs}, journal = {ACM/IEEE Conference on Supercomputing (SC{\textquoteright}11)}, year = {2011}, month = {2011-11}, address = {Seattle, WA}, keywords = {magma}, author = {Rajib Nath and Stanimire Tomov and Tingxing Dong and Jack Dongarra} } @inproceedings {icl:590, title = {Overlapping Computation and Communication for Advection on a Hybrid Parallel Computer}, journal = {IEEE International Parallel and Distributed Processing Symposium (submitted)}, year = {2011}, month = {2011-05}, address = {Anchorage, AK}, author = {James B White and Jack Dongarra} } @inproceedings {icl:657, title = {Parallel Reduction to Condensed Forms for Symmetric Eigenvalue Problems using Aggregated Fine-Grained and Memory-Aware Kernels}, journal = {Proceedings of 2011 International Conference for High Performance Computing, Networking, Storage and Analysis (SC11)}, year = {2011}, month = {2011-11}, address = {Seattle, WA}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:627, title = {Parallel Reduction to Condensed Forms for Symmetric Eigenvalue Problems using Aggregated Fine-Grained and Memory-Aware Kernels}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-677, (also Lawn254)}, year = {2011}, month = {2011-08}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:641, title = {A parallel tiled solver for dense symmetric indefinite systems on multicore architectures}, journal = {University of Tennessee Computer Science Technical Report}, number = {ICL-UT-11-07}, year = {2011}, month = {2011-10}, keywords = {plasma, quark}, author = {Marc Baboulin and Dulceneia Becker and Jack Dongarra} } @techreport {icl:644, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Technical Report}, number = {CS-89-85}, year = {2011}, month = {2011-00}, author = {Jack Dongarra} } @article {icl:636, title = {Performance Portability of a GPU Enabled Factorization with the DAGuE Framework}, journal = {IEEE Cluster: workshop on Parallel Programming on Accelerator Clusters (PPAC)}, year = {2011}, month = {2011-06}, keywords = {dague, magma, parsec}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Pierre Lemariner and Narapat Ohm Saengpatsa and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:648, title = {Process Distance-aware Adaptive MPI Collective Communications}, journal = {IEEE Int{\textquoteright}l Conference on Cluster Computing (Cluster 2011)}, year = {2011}, month = {2011-00}, address = {Austin, Texas}, author = {Teng Ma and Thomas Herault and George Bosilca and Jack Dongarra} } @inproceedings {icl:621, title = {Profiling High Performance Dense Linear Algebra Algorithms on Multicore Architectures for Power and Energy Efficiency}, journal = {International Conference on Energy-Aware High Performance Computing (EnA-HPC 2011)}, year = {2011}, month = {2011-09}, address = {Hamburg, Germany}, keywords = {mumi}, author = {Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @article {icl:677, title = {QCG-OMPI: MPI Applications on Grids.}, journal = {Future Generation Computer Systems}, volume = {27}, number = {4}, year = {2011}, month = {2011-01}, pages = {435-369}, author = {Emmanuel Agullo and Camille Coti and Thomas Herault and Julien Langou and Sylvain Peyronnet and A. Rezmerita and Franck Cappello and Jack Dongarra} } @techreport {icl:609, title = {QUARK Users{\textquoteright} Guide: QUeueing And Runtime for Kernels}, journal = {University of Tennessee Innovative Computing Laboratory Technical Report}, number = {ICL-UT-11-02}, year = {2011}, month = {2011-00}, keywords = {magma, plasma, quark}, author = {Asim YarKhan and Jakub Kurzak and Jack Dongarra} } @techreport {icl:613, title = {Reducing the Amount of Pivoting in Symmetric Indefinite Systems}, journal = {University of Tennessee Innovative Computing Laboratory Technical Report}, number = {ICL-UT-11-06}, year = {2011}, month = {2011-05}, publisher = {Submitted to PPAM 2011}, address = {Knoxville, TN}, author = {Dulceneia Becker and Marc Baboulin and Jack Dongarra} } @techreport {icl:612, title = {On Scalability for MPI Runtime Systems}, journal = {University of Tennessee Computer Science Technical Report}, number = {ICL-UT-11-05}, year = {2011}, month = {2011-05}, address = {Knoxville, TN}, author = {George Bosilca and Thomas Herault and A. Rezmerita and Jack Dongarra} } @inproceedings {icl:671, title = {On Scalability for MPI Runtime Systems}, journal = {International Conference on Cluster Computing (CLUSTER)}, year = {2011}, month = {2011-09}, pages = {187-195}, publisher = {IEEEE}, address = {Austin, TX, USA}, keywords = {harness}, author = {George Bosilca and Thomas Herault and A. Rezmerita and Jack Dongarra} } @inproceedings {icl:674, title = {Scalable Runtime for MPI: Efficiently Building the Communication Infrastructure}, journal = {Proceedings of Recent Advances in the Message Passing Interface - 18th European MPI Users{\textquoteright} Group Meeting, EuroMPI 2011}, volume = {6960}, year = {2011}, month = {2011-09}, pages = {342-344}, publisher = {Springer}, address = {Santorini, Greece}, keywords = {ftmpi}, author = {George Bosilca and Thomas Herault and Pierre Lemariner and Jack Dongarra and A. Rezmerita}, editor = {Yiannis Cotronis and Anthony Danalis and Dimitrios S. Nikolopoulos and Jack Dongarra} } @techreport {icl:625, title = {Soft Error Resilient QR Factorization for Hybrid System}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-11-675}, year = {2011}, month = {2011-07}, address = {Knoxville, TN}, keywords = {ft-la}, author = {Peng Du and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {icl:635, title = {Soft Error Resilient QR Factorization for Hybrid System}, journal = {UT-CS-11-675 (also LAPACK Working Note $\#$252)}, number = {ICL-CS-11-675}, year = {2011}, month = {2011-07}, keywords = {magma}, author = {Peng Du and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {icl:642, title = {Soft Error Resilient QR Factorization for Hybrid System with GPGPU}, journal = {Journal of Computational Science}, year = {2011}, month = {2011-11}, publisher = {Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems at SC11}, address = {Seattle, WA}, keywords = {ft-la}, author = {Peng Du and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {icl:604, title = {Toward High Performance Divide and Conquer Eigensolver for Dense Symmetric Matrices.}, journal = {Submitted to SIAM Journal on Scientific Computing (SISC)}, year = {2011}, month = {2011-00}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:610, title = {Towards a Parallel Tile LDL Factorization for Multicore Architectures}, journal = {ICL Technical Report}, number = {ICL-UT-11-03}, year = {2011}, month = {2011-04}, address = {Seattle, WA}, keywords = {plasma, quark}, author = {Dulceneia Becker and Mathieu Faverge and Jack Dongarra} } @inproceedings {icl:592, title = {Two-stage Tridiagonal Reduction for Dense Symmetric Matrices using Tile Algorithms on Multicore Architectures}, journal = {IEEE International Parallel and Distributed Processing Symposium (submitted)}, year = {2011}, month = {2011-05}, address = {Anchorage, AK}, author = {Piotr Luszczek and Hatem Ltaeif and Jack Dongarra} } @inproceedings {icl:593, title = {A Unified HPC Environment for Hybrid Manycore/GPU Distributed Systems}, journal = {IEEE International Parallel and Distributed Processing Symposium (submitted)}, year = {2011}, month = {2011-05}, address = {Anchorage, AK}, keywords = {dague}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Pierre Lemariner and Narapat Ohm Saengpatsa and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:580, title = {8th International Conference on Parallel Processing and Applied Mathematics, Lecture Notes in Computer Science (LNCS)}, journal = {PPAM 2009 Proceedings}, volume = {6067}, year = {2010}, month = {2010-09}, publisher = {Springer}, address = {Wroclaw, Poland}, editor = {Roman Wyrzykowski and Jack Dongarra and Konrad Karczewski and Jerzy Wasniewski} } @article {icl:546, title = {Accelerating GPU Kernels for Dense Linear Algebra}, journal = {Proc. of VECPAR{\textquoteright}10}, year = {2010}, month = {2010-06}, address = {Berkeley, CA}, keywords = {magma}, author = {Rajib Nath and Stanimire Tomov and Jack Dongarra} } @article {icl:547, title = {Accelerating the Reduction to Upper Hessenberg, Tridiagonal, and Bidiagonal Forms through Hybrid GPU-Based Computing}, journal = {Parallel Computing}, volume = {36}, number = {12}, year = {2010}, month = {2010-00}, pages = {645-654}, keywords = {magma}, author = {Stanimire Tomov and Rajib Nath and Jack Dongarra} } @article {icl:533, title = {Analysis of Dynamically Scheduled Tile Algorithms for Dense Linear Algebra on Multicore Architectures}, journal = {Submitted to Concurrency and Computations: Practice and Experience}, year = {2010}, month = {2010-11}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Asim YarKhan and Jack Dongarra} } @techreport {icl:536, title = {Analysis of Various Scalar, Vector, and Parallel Implementations of RandomAccess}, journal = {Innovative Computing Laboratory (ICL) Technical Report}, number = {ICL-UT-10-03}, year = {2010}, month = {2010-06}, keywords = {hpcc}, author = {Piotr Luszczek and Jack Dongarra} } @article {1364, title = {Autotuning Dense Linear Algebra Libraries on GPUs}, year = {2010}, month = {2010-06}, publisher = {Sixth International Workshop on Parallel Matrix Algorithms and Applications (PMAA 2010)}, address = {Basel, Switzerland}, author = {Rajib Nath and Stanimire Tomov and Emmanuel Agullo and Jack Dongarra} } @inbook {854, title = {Blas for GPUs}, booktitle = {Scientific Computing with Multicore and Accelerators}, series = {Chapman \& Hall/CRC Computational Science}, year = {2010}, publisher = {CRC Press}, organization = {CRC Press}, chapter = {4}, address = {Boca Raton, Florida}, isbn = {9781439825365}, author = {Rajib Nath and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:598, title = {Can Hardware Performance Counters Produce Expected, Deterministic Results?}, journal = {3rd Workshop on Functionality of Hardware Performance Monitoring}, year = {2010}, month = {2010-12}, address = {Atlanta, GA}, keywords = {papi}, author = {Vincent M Weaver and Jack Dongarra} } @article {icl:572, title = {A Class of Parallel Tiled Linear Algebra Algorithms for Multicore Architectures}, journal = {Parallel Computing (to appear)}, year = {2010}, month = {2010-00}, author = {Alfredo Buttari and Julien Langou and Jakub Kurzak and Jack Dongarra} } @article {icl:557, title = {Collecting Performance Data with PAPI-C}, journal = {Tools for High Performance Computing 2009}, year = {2010}, month = {2010-05}, pages = {157-173}, publisher = {Springer Berlin / Heidelberg}, address = {3rd Parallel Tools Workshop, Dresden, Germany}, abstract = {Modern high performance computer systems continue to increase in size and complexity. Tools to measure application performance in these increasingly complex environments must also increase the richness of their measurements to provide insights into the increasingly intricate ways in which software and hardware interact. PAPI (the Performance API) has provided consistent platform and operating system independent access to CPU hardware performance counters for nearly a decade. Recent trends toward massively parallel multi-core systems with often heterogeneous architectures present new challenges for the measurement of hardware performance information, which is now available not only on the CPU core itself, but scattered across the chip and system. We discuss the evolution of PAPI into Component PAPI, or PAPI-C, in which multiple sources of performance data can be measured simultaneously via a common software interface. Several examples of components and component data measurements are discussed. We explore the challenges to hardware performance measurement in existing multi-core architectures. We conclude with an exploration of future directions for the PAPI interface.}, keywords = {mumi, papi}, doi = {https://doi.org/10.1007/978-3-642-11261-4_11}, author = {Dan Terpstra and Heike Jagode and Haihang You and Jack Dongarra} } @article {icl:555, title = {Constructing Resiliant Communication Infrastructure for Runtime Environments in Advances in Parallel Computing}, journal = {Advances in Parallel Computing - Parallel Computing: From Multicores and GPU{\textquoteright}s to Petascale}, volume = {19}, year = {2010}, pages = {441-451}, doi = {10.3233/978-1-60750-530-3-441}, author = {George Bosilca and Camille Coti and Thomas Herault and Pierre Lemariner and Jack Dongarra}, editor = {Barbara Chapman and Frederic Desprez and Gerhard R. Joubert and Alain Lichnewsky and Frans Peters and T. Priol} } @techreport {icl:528, title = {DAGuE: A generic distributed DAG engine for high performance computing}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-10-01}, year = {2010}, month = {2010-04}, keywords = {dague}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Thomas Herault and Pierre Lemariner and Jack Dongarra} } @inbook {855, title = {Dense Linear Algebra for Hybrid GPU-based Systems}, booktitle = {Scientific Computing with Multicore and Accelerators}, series = {Chapman \& Hall/CRC Computational Science}, year = {2010}, publisher = {CRC Press}, organization = {CRC Press}, chapter = {3}, address = {Boca Raton, Florida}, isbn = {9781439825365}, author = {Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:523, title = {Dense Linear Algebra Solvers for Multicore with GPU Accelerators}, journal = {Parallel Distributed Processing, Workshops and Phd Forum (IPDPSW), 2010 IEEE International Symposium on}, year = {2010}, pages = {1-8}, address = {Atlanta, GA}, abstract = {Solving dense linear systems of equations is a fundamental problem in scientific computing. Numerical simulations involving complex systems represented in terms of unknown variables and relations between them often lead to linear systems of equations that must be solved as fast as possible. We describe current efforts toward the development of these critical solvers in the area of dense linear algebra (DLA) for multicore with GPU accelerators. We describe how to code/develop solvers to effectively use the high computing power available in these new and emerging hybrid architectures. The approach taken is based on hybridization techniques in the context of Cholesky, LU, and QR factorizations. We use a high-level parallel programming model and leverage existing software infrastructure, e.g. optimized BLAS for CPU and GPU, and LAPACK for sequential CPU processing. Included also are architecture and algorithm-specific optimizations for standard solvers as well as mixed-precision iterative refinement solvers. The new algorithms, depending on the hardware configuration and routine parameters, can lead to orders of magnitude acceleration when compared to the same algorithms on standard multicore architectures that do not contain GPU accelerators. The newly developed DLA solvers are integrated and freely available through the MAGMA library.}, doi = {10.1109/IPDPSW.2010.5470941}, author = {Stanimire Tomov and Rajib Nath and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:563, title = {Distributed Dense Numerical Linear Algebra Algorithms on Massively Parallel Architectures: DPLASMA}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-10-660}, year = {2010}, month = {2010-09}, keywords = {dague, dplasma, parsec, plasma}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @techreport {icl:529, title = {Distributed-Memory Task Execution and Dependence Tracking within DAGuE and the DPLASMA Project}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-10-02}, year = {2010}, month = {2010-00}, keywords = {dague, plasma}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @article {icl:639, title = {Divide \& Conquer on Hybrid GPU-Accelerated Multicore Systems}, journal = {SIAM Journal on Scientific Computing (submitted)}, year = {2010}, month = {2010-08}, keywords = {magma}, author = {Christof Voemel and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:534, title = {Dodging the Cost of Unavoidable Memory Copies in Message Logging Protocols}, journal = {Proceedings of EuroMPI 2010}, year = {2010}, month = {2010-09}, publisher = {Springer}, address = {Stuttgart, Germany}, keywords = {ftmpi}, author = {George Bosilca and Aurelien Bouteiller and Thomas Herault and Pierre Lemariner and Jack Dongarra}, editor = {Jack Dongarra and Michael Resch and Rainer Keller and Edgar Gabriel} } @article {icl:586, title = {Empirical Performance Tuning of Dense Linear Algebra Software}, journal = {in Performance Tuning of Scientific Applications (to appear)}, year = {2010}, month = {2010-00}, author = {Jack Dongarra and Shirley Moore}, editor = {David Bailey and Robert Lucas and Sam Williams} } @techreport {icl:600, title = {EZTrace: a generic framework for performance analysis}, journal = {ICL Technical Report}, number = {ICL-UT-11-01}, year = {2010}, month = {2010-12}, author = {Jack Dongarra and Mathieu Faverge and Yutaka Ishikawa and Raymond Namyst and Fran{\c c}ois Rue and Francois Trahay} } @techreport {icl:585, title = {Faster, Cheaper, Better - A Hybridization Methodology to Develop Linear Algebra Software for GPUs}, journal = {LAPACK Working Note}, number = {230}, year = {2010}, month = {2010-00}, keywords = {magma, morse}, author = {Emmanuel Agullo and Cedric Augonnet and Jack Dongarra and Hatem Ltaeif and Raymond Namyst and Samuel Thibault and Stanimire Tomov} } @article {icl:526, title = {Hybrid Multicore Cholesky Factorization with Multiple GPU Accelerators}, journal = {IEEE Transaction on Parallel and Distributed Systems (submitted)}, year = {2010}, month = {2010-03}, keywords = {magma, plasma}, author = {Hatem Ltaeif and Stanimire Tomov and Rajib Nath and Jack Dongarra} } @techreport {icl:548, title = {An Improved MAGMA GEMM for Fermi GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-10-655 (also LAPACK working note 227)}, year = {2010}, month = {2010-07}, keywords = {magma}, author = {Rajib Nath and Stanimire Tomov and Jack Dongarra} } @article {icl:582, title = {An Improved MAGMA GEMM for Fermi GPUs}, journal = {International Journal of High Performance Computing}, volume = {24}, number = {4}, year = {2010}, month = {2010-00}, pages = {511-515}, keywords = {magma}, author = {Rajib Nath and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:527, title = {Improvement of parallelization efficiency of batch pattern BP training algorithm using Open MPI}, journal = {Proceedings of International Conference on Computational Science, ICCS 2010 (to appear)}, year = {2010}, month = {2010-06}, publisher = {Elsevier}, address = {Amsterdam The Netherlands}, keywords = {hpcchallenge}, author = {Volodymyr Turchenko and Lucio Grandinetti and George Bosilca and Jack Dongarra} } @techreport {icl:550, title = {International Exascale Software Project Roadmap v1.0}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-10-654}, year = {2010}, month = {2010-05}, author = {Jack Dongarra and Pete Beckman} } @article {1350, title = {An Introduction to the MAGMA project - Acceleration of Dense Linear Algebra}, year = {2010}, month = {2010-06}, publisher = {NVIDIA Webinar}, url = {http://developer.download.nvidia.com/CUDA/training/introtomagma.mp4}, author = {Jack Dongarra and Stanimire Tomov} } @techreport {icl:597, title = {Kernel Assisted Collective Intra-node Communication Among Multicore and Manycore CPUs}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-10-663}, year = {2010}, month = {2010-11}, author = {Teng Ma and George Bosilca and Aurelien Bouteiller and Brice Goglin and J. Squyres and Jack Dongarra} } @article {icl:573, title = {Level-3 Cholesky Kernel Subroutine of a Fully Portable High Performance Minimal Storage Hybrid Format Cholesky Algorithm}, journal = {ACM TOMS (submitted), also LAPACK Working Note (LAWN) 211}, year = {2010}, month = {2010-00}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra} } @article {icl:556, title = {LINPACK on Future Manycore and GPu Based Systems}, journal = {PARA 2010}, year = {2010}, month = {2010-06}, address = {Reykjavik, Iceland}, author = {Jack Dongarra} } @inproceedings {icl:535, title = {Locality and Topology aware Intra-node Communication Among Multicore CPUs}, journal = {Proceedings of the 17th EuroMPI conference}, year = {2010}, month = {2010-09}, publisher = {LNCS}, address = {Stuttgart, Germany}, author = {Teng Ma and Aurelien Bouteiller and George Bosilca and Jack Dongarra} } @inproceedings {icl:562, title = {Mixed-Tool Performance Analysis on Hybrid Multicore Architectures}, journal = {First International Workshop on Parallel Software Tools and Tool Infrastructures (PSTI 2010)}, year = {2010}, month = {2010-09}, address = {San Diego, CA}, keywords = {magma}, author = {Peng Du and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @inproceedings {icl:549, title = {OpenCL Evaluation for Numerical Linear Algebra Library Development}, journal = {Symposium on Application Accelerators in High-Performance Computing (SAAHPC {\textquoteright}10)}, year = {2010}, month = {2010-07}, address = {Knoxville, TN}, keywords = {magma}, author = {Peng Du and Piotr Luszczek and Jack Dongarra} } @article {icl:569, title = {Parallel Band Two-Sided Matrix Bidiagonalization for Multicore Architectures}, journal = {IEEE Transactions on Parallel and Distributed Systems}, year = {2010}, month = {2010-04}, pages = {417-423}, author = {Hatem Ltaeif and Jakub Kurzak and Jack Dongarra} } @inproceedings {icl:584, title = {Performance Evaluation for Petascale Quantum Simulation Tools}, journal = {Proceedings of the Cray Users{\textquoteright} Group Meeting}, year = {2010}, month = {2010-05}, address = {Atlanta, GA}, author = {Stanimire Tomov and Wenchang Lu and and Jerzy Bernholc and Shirley Moore and Jack Dongarra} } @techreport {icl:575, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-89-85}, year = {2010}, month = {2010-00}, author = {Jack Dongarra} } @article {icl:579, title = {Proceedings of the International Conference on Computational Science}, journal = {ICCS 2010}, year = {2010}, month = {2010-05}, publisher = {Elsevier}, address = {Amsterdam}, editor = {Peter M. Sloot and Geert Dick van Albada and Jack Dongarra} } @article {icl:574, title = {QCG-OMPI: MPI Applications on Grids}, journal = {Future Generation Computer Systems}, volume = {27}, number = {4}, year = {2010}, month = {2010-03}, pages = {357-369}, author = {Emmanuel Agullo and Camille Coti and Thomas Herault and Julien Langou and Sylvain Peyronnet and A. Rezmerita and Franck Cappello and Jack Dongarra} } @article {icl:571, title = {QR Factorization for the CELL Processor}, journal = {Scientific Programming}, volume = {17}, number = {1-2}, year = {2010}, month = {2010-00}, pages = {31-42}, author = {Jakub Kurzak and Jack Dongarra} } @inproceedings {icl:532, title = {QR Factorization of Tall and Skinny Matrices in a Grid Computing Environment}, journal = {24th IEEE International Parallel and Distributed Processing Symposium (also LAWN 224)}, year = {2010}, month = {2010-04}, address = {Atlanta, GA}, author = {Emmanuel Agullo and Camille Coti and Jack Dongarra and Thomas Herault and Julien Langou} } @inproceedings {icl:577, title = {QR Factorization on a Multicore Node Enhanced with Multiple GPU Accelerators}, journal = {Proceedings of IPDPS 2011}, number = {ICL-UT-10-04}, year = {2010}, month = {2010-10}, address = {Anchorage, AK}, keywords = {magma, morse, plasma}, author = {Emmanuel Agullo and Cedric Augonnet and Jack Dongarra and Mathieu Faverge and Hatem Ltaeif and Samuel Thibault and Stanimire Tomov} } @inproceedings {icl:581, title = {Recent Advances in the Message Passing Interface, Lecture Notes in Computer Science (LNCS)}, journal = {EuroMPI 2010 Proceedings}, volume = {6305}, year = {2010}, month = {2010-09}, publisher = {Springer}, address = {Stuttgart, Germany}, editor = {Rainer Keller and Edgar Gabriel and Michael Resch and Jack Dongarra} } @article {icl:551, title = {Rectangular Full Packed Format for Cholesky{\textquoteright}s Algorithm: Factorization, Solution, and Inversion}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {37}, number = {2}, year = {2010}, month = {2010-04}, address = {Atlanta, GA}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra and Julien Langou} } @article {icl:570, title = {Rectangular Full Packed Format for Cholesky{\textquoteright}s Algorithm: Factorization, Solution and Inversion}, journal = {ACM Transactions on Mathematical Software (TOMS)}, volume = {37}, number = {2}, year = {2010}, month = {2010-04}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra and Julien Langou} } @article {icl:565, title = {Redesigning the Message Logging Model for High Performance}, journal = {Concurrency and Computation: Practice and Experience (online version)}, year = {2010}, month = {2010-06}, author = {Aurelien Bouteiller and George Bosilca and Jack Dongarra} } @techreport {icl:578, title = {Reducing the time to tune parallel dense linear algebra routines with partial execution and performance modelling}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-10-661}, year = {2010}, month = {2010-10}, keywords = {hpcc}, author = {Jack Dongarra and Piotr Luszczek} } @article {icl:521, title = {A Scalable High Performant Cholesky Factorization for Multicore with GPU Accelerators}, journal = {Proc. of VECPAR{\textquoteright}10 (to appear)}, year = {2010}, month = {2010-06}, address = {Berkeley, CA}, keywords = {magma, plasma}, author = {Hatem Ltaeif and Stanimire Tomov and Rajib Nath and Peng Du and Jack Dongarra} } @techreport {icl:530, title = {Scalable Tile Communication-Avoiding QR Factorization on Multicore Cluster Systems}, journal = {University of Tennessee Computer Science Technical Report}, volume = {{\textendash}10-653}, year = {2010}, month = {2010-04}, keywords = {plasma}, author = {Fengguang Song and Hatem Ltaeif and Bilel Hadri and Jack Dongarra} } @article {icl:559, title = {Scalable Tile Communication-Avoiding QR Factorization on Multicore Cluster Systems}, journal = {SC{\textquoteright}10}, year = {2010}, month = {2010-11}, publisher = {ACM SIGARCH/ IEEE Computer Society}, address = {New Orleans, LA}, keywords = {plasma}, author = {Fengguang Song and Hatem Ltaeif and Bilel Hadri and Jack Dongarra} } @article {1362, title = {Scheduling Cholesky Factorization on Multicore Architectures with GPU Accelerators}, year = {2010}, month = {2010-07}, publisher = {2010 Symposium on Application Accelerators in High-Performance Computing (SAAHPC{\textquoteright}10), Poster}, address = {Knoxville, TN}, author = {Emmanuel Agullo and Cedric Augonnet and Jack Dongarra and Hatem Ltaeif and Raymond Namyst and Rajib Nath and Jean Roman and Samuel Thibault and Stanimire Tomov} } @article {icl:540, title = {Scheduling Dense Linear Algebra Operations on Multicore Processors}, journal = {Concurrency and Computation: Practice and Experience}, volume = {22}, number = {1}, year = {2010}, month = {2010-01}, pages = {15-44}, keywords = {gridpac, plasma}, author = {Jakub Kurzak and Hatem Ltaeif and Jack Dongarra and Rosa M. Badia} } @article {icl:473, title = {Scheduling Two-sided Transformations using Tile Algorithms on Multicore Architectures}, journal = {Journal of Scientific Computing}, volume = {18}, number = {1}, year = {2010}, month = {2010-00}, pages = {33-50}, keywords = {plasma}, author = {Hatem Ltaeif and Jakub Kurzak and Jack Dongarra and Rosa M. Badia} } @article {icl:567, title = {Self-Healing Network for Scalable Fault-Tolerant Runtime Environments}, journal = {Future Generation Computer Systems}, volume = {26}, number = {3}, year = {2010}, month = {2010-03}, pages = {479-485}, author = {Thara Angskun and Graham Fagg and George Bosilca and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra} } @article {icl:566, title = {SmartGridRPC: The new RPC model for high performance Grid Computing and Its Implementation in SmartGridSolve}, journal = {Concurrency and Computation: Practice and Experience (to appear)}, year = {2010}, month = {2010-01}, keywords = {netsolve}, author = {Thomas Brady and Alexey Lastovetsky and Keith Seymour and Michele Guidolin and Jack Dongarra} } @inproceedings {icl:588, title = {Tile QR Factorization with Parallel Panel Processing for Multicore Architectures}, journal = {24th IEEE International Parallel and Distributed Processing Symposium (submitted)}, year = {2010}, month = {2010-00}, author = {Bilel Hadri and Emmanuel Agullo and Jack Dongarra} } @article {icl:564, title = {Towards Dense Linear Algebra for Hybrid GPU Accelerated Manycore Systems}, journal = {Parallel Computing}, volume = {36}, number = {5-6}, year = {2010}, month = {2010-00}, pages = {232-240}, keywords = {magma}, author = {Stanimire Tomov and Jack Dongarra and Marc Baboulin} } @article {icl:576, title = {Trace-based Performance Analysis for the Petascale Simulation Code FLASH}, journal = {International Journal of High Performance Computing Applications (to appear)}, year = {2010}, month = {2010-00}, author = {Heike Jagode and Andreas Knuepfer and Jack Dongarra and Matthias Jurenz and Matthias S. Mueller and Wolfgang E. Nagel} } @article {icl:561, title = {Tuning Principal Component Analysis for GRASS GIS on Multi-core and GPU Architectures}, journal = {FOSS4G 2010}, year = {2010}, month = {2010-09}, address = {Barcelona, Spain}, keywords = {magma}, author = {Peng Du and Matthew Parsons and Erika Fuentes and Shih-Lung Shaw and Jack Dongarra} } @article {icl:620, title = {Using MAGMA with PGI Fortran}, journal = {PGI Insider}, year = {2010}, month = {2010-11}, keywords = {magma}, author = {Stanimire Tomov and Mathieu Faverge and Piotr Luszczek and Jack Dongarra} } @article {, title = {Accelerating Scientific Computations with Mixed Precision Algorithms}, journal = {Computer Physics Communications}, volume = {180}, year = {2009}, month = {2009-12}, pages = {2526-2533}, abstract = {On modern architectures, the performance of 32-bit operations is often at least twice as fast as the performance of 64-bit operations. By using a combination of 32-bit and 64-bit floating point arithmetic, the performance of many dense and sparse linear algebra algorithms can be significantly enhanced while maintaining the 64-bit accuracy of the resulting solution. The approach presented here can apply not only to conventional processors but also to other technologies such as Field Programmable Gate Arrays (FPGA), Graphical Processing Units (GPU), and the STI Cell BE processor. Results on modern processor architectures and the STI Cell BE are presented.}, doi = {https://doi.org/10.1016/j.cpc.2008.11.005}, author = {Marc Baboulin and Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Julie Langou and Julien Langou and Piotr Luszczek and Stanimire Tomov} } @techreport {icl:485, title = {Accelerating the Reduction to Upper Hessenberg Form through Hybrid GPU-Based Computing}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-09-642 (also LAPACK Working Note 219)}, year = {2009}, month = {2009-05}, keywords = {magma}, author = {Stanimire Tomov and Jack Dongarra} } @article {icl:568, title = {Accelerating Time-To-Solution for Computational Science and Engineering}, journal = {SciDAC Review}, year = {2009}, month = {2009-00}, author = {James Demmel and Jack Dongarra and Armando Fox and Sam Williams and Vasily Volkov and Katherine Yelick} } @article {icl:490, title = {Algorithmic Based Fault Tolerance Applied to High Performance Computing}, journal = {Journal of Parallel and Distributed Computing}, volume = {69}, year = {2009}, month = {2009-00}, pages = {410-416}, author = {Jack Dongarra and George Bosilca and Remi Delmas and Julien Langou} } @article {icl:479, title = {Analytical Modeling and Optimization for Affinity Based Thread Scheduling on Multicore Systems}, journal = {IEEE Cluster 2009}, year = {2009}, month = {2009-08}, address = {New Orleans}, keywords = {gridpac, mumi}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @article {icl:509, title = {A Class of Parallel Tiled Linear Algebra Algorithms for Multicore Architectures}, journal = {Parallel Computing}, volume = {35}, year = {2009}, month = {2009-00}, pages = {38-53}, keywords = {plasma}, author = {Alfredo Buttari and Julien Langou and Jakub Kurzak and Jack Dongarra} } @inproceedings {icl:488, title = {Comparative Study of One-Sided Factorizations with Multiple Software Packages on Multi-Core Hardware}, journal = {2009 International Conference for High Performance Computing, Networking, Storage, and Analysis (SC {\textquoteright}09) (to appear)}, year = {2009}, month = {2009-00}, author = {Emmanuel Agullo and Bilel Hadri and Hatem Ltaeif and Jack Dongarra} } @article {icl:507, title = {Computational Science {\textendash} ICCS 2009, Proceedings of the 9th International Conference}, journal = {Lecture Notes in Computer Science: Theoretical Computer Science and General Issues}, volume = {-}, number = {5544-5545}, year = {2009}, month = {2009-05}, address = {Baton Rouge, LA}, editor = {Gabrielle Allen and Jaros{\l}aw Nabrzyski and E. Seidel and Geert Dick van Albada and Jack Dongarra and Peter M. Sloot} } @article {icl:482, title = {Computing the Conditioning of the Components of a Linear Least-squares Solution}, journal = {Numerical Linear Algebra with Applications}, volume = {16}, number = {7}, year = {2009}, month = {2009-00}, pages = {517-533}, author = {Marc Baboulin and Jack Dongarra and Serge Gratton and Julien Langou} } @techreport {icl:484, title = {Constructing resiliant communication infrastructure for runtime environments}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-09-02}, year = {2009}, month = {2009-07}, author = {George Bosilca and Camille Coti and Thomas Herault and Pierre Lemariner and Jack Dongarra} } @article {icl:517, title = {Constructing Resilient Communication Infrastructure for Runtime Environments}, journal = {ParCo 2009}, year = {2009}, month = {2009-09}, address = {Lyon France}, author = {Pierre Lemariner and George Bosilca and Camille Coti and Thomas Herault and Jack Dongarra} } @article {icl:518, title = {Dependency-Driven Scheduling of Dense Matrix Factorizations on Shared-Memory Systems}, journal = {PPAM 2009}, year = {2009}, month = {2009-09}, address = {Poland}, author = {Jakub Kurzak and Hatem Ltaeif and Jack Dongarra and Rosa M. Badia} } @inproceedings {icl:493, title = {Dynamic Task Scheduling for Linear Algebra Algorithms on Distributed-Memory Multicore Systems}, journal = {International Conference for High Performance Computing, Networking, Storage, and Analysis (SC {\textquoteright}09)}, year = {2009}, month = {2009-11}, address = {Portland, OR}, keywords = {mumi, plasma}, author = {Fengguang Song and Asim YarKhan and Jack Dongarra} } @article {icl:524, title = {Enhancing Parallelism of Tile QR Factorization for Multicore Architectures}, journal = {Submitted to Transaction on Parallel and Distributed Systems}, year = {2009}, month = {2009-12}, keywords = {plasma}, author = {Bilel Hadri and Hatem Ltaeif and Emmanuel Agullo and Jack Dongarra} } @techreport {icl:494, title = {Fully Dynamic Scheduler for Numerical Computing on Multicore Processors}, journal = {University of Tennessee Computer Science Department Technical Report, UT-CS-09-643 (Also LAPACK Working Note 220)}, year = {2009}, month = {2009-00}, author = {Jakub Kurzak and Jack Dongarra} } @inproceedings {icl:520, title = {Grid Computing applied to the Boundary Element Method}, journal = {Proceedings of the First International Conference on Parallel, Distributed and Grid Computing for Engineering}, volume = {27}, number = {:104203/9027}, year = {2009}, month = {2009-00}, publisher = {Civil-Comp Press}, address = {Stirlingshire, UK}, keywords = {netsolve}, author = {Manoel Cunha and Jose Telles and Asim YarKhan and Jack Dongarra}, editor = {B. H. V. Topping and Peter Iv{\'a}nyi} } @article {, title = {Highly Scalable Self-Healing Algorithms for High Performance Scientific Computing}, journal = {IEEE Transactions on Computers}, volume = {58}, year = {2009}, month = {2009-11}, pages = {1512-1524}, abstract = {As the number of processors in today{\textquoteright}s high-performance computers continues to grow, the mean-time-to-failure of these computers is becoming significantly shorter than the execution time of many current high-performance computing applications. Although today{\textquoteright}s architectures are usually robust enough to survive node failures without suffering complete system failure, most of today{\textquoteright}s high-performance computing applications cannot survive node failures. Therefore, whenever a node fails, all surviving processes on surviving nodes usually have to be aborted and the whole application has to be restarted. In this paper, we present a framework for building self-healing high-performance numerical computing applications so that they can adapt to node or link failures without aborting themselves. The framework is based on FT-MPI and diskless checkpointing. Our diskless checkpointing uses weighted checksum schemes, a variation of Reed-Solomon erasure codes over floating-point numbers. We introduce several scalable encoding strategies into the existing diskless checkpointing and reduce the overhead to survive k failures in p processes from 2[log p]. k ((beta + 2gamma) m + alpha) to (1 + O (radic(p)/radic(m))) 2 . k (beta + 2gamma)m, where alpha is the communication latency, 1/beta is the network bandwidth between processes, {1\over \gamma } is the rate to perform calculations, and m is the size of local checkpoint per process. When additional checkpoint processors are used, the overhead can be reduced to (1 + O (1/radic(m))). k (beta + 2gamma)m, which is independent of the total number of computational processors. The introduced self-healing algorithms are scalable in the sense that the overhead to survive k failures in p processes does not increase as the number of processes p increases. We evaluate the performance overhead of our self-healing approach by using a preconditioned conjugate gradient equation solver as an example.}, doi = {https://doi.org/10.1109/TC.2009.42}, author = {Zizhong Chen and Jack Dongarra} } @inproceedings {icl:474, title = {A Holistic Approach for Performance Measurement and Analysis for Petascale Applications}, journal = {ICCS 2009 Joint Workshop: Tools for Program Development and Analysis in Computational Science and Software Engineering for Large-Scale Computing}, volume = {2009}, year = {2009}, month = {2009-05}, pages = {686-695}, publisher = {Springer-Verlag Berlin Heidelberg 2009}, address = {Baton Rouge, Louisiana}, keywords = {point, test}, author = {Heike Jagode and Jack Dongarra and Sadaf Alam and Jeffrey Vetter and W. Spear and Allen D. Malony}, editor = {Gabrielle Allen} } @article {icl:481, title = {The International Exascale Software Project: A Call to Cooperative Action by the Global High Performance Community}, journal = {International Journal of High Performance Computing Applications (to appear)}, year = {2009}, month = {2009-07}, author = {Jack Dongarra and Pete Beckman and Patrick Aerts and Franck Cappello and Thomas Lippert and Satoshi Matsuoka and Paul Messina and Terry Moore and Rick Stevens and Anne Trefethen and Mateo Valero} } @article {icl:497, title = {I/O Performance Analysis for the Petascale Simulation Code FLASH}, journal = {ISC{\textquoteright}09}, year = {2009}, month = {2009-06}, address = {Hamburg, Germany}, keywords = {test}, author = {Heike Jagode and Shirley Moore and Dan Terpstra and Jack Dongarra and Andreas Knuepfer and Matthias Jurenz and Matthias S. Mueller and Wolfgang E. Nagel} } @inproceedings {icl:512, title = {A Note on Auto-tuning GEMM for GPUs}, journal = {9th International Conference on Computational Science (ICCS 2009)}, number = {5544-5545}, year = {2009}, month = {2009-05}, pages = {884-892}, address = {Baton Rouge, LA}, doi = {10.1007/978-3-642-01970-8_89}, author = {Yinan Li and Jack Dongarra and Stanimire Tomov}, editor = {Gabrielle Allen and Jaros{\l}aw Nabrzyski and E. Seidel and Geert Dick van Albada and Jack Dongarra and Peter M. Sloot} } @article {1352, title = {Numerical Linear Algebra on Emerging Architectures: The PLASMA and MAGMA Projects}, year = {2009}, month = {2009-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC09)}, address = {Portland, OR}, author = {Emmanuel Agullo and James Demmel and Jack Dongarra and Bilel Hadri and Jakub Kurzak and Julien Langou and Hatem Ltaeif and Piotr Luszczek and Rajib Nath and Stanimire Tomov and Asim YarKhan and Vasily Volkov} } @inproceedings {icl:486, title = {Numerical Linear Algebra on Emerging Architectures: The PLASMA and MAGMA Projects}, journal = {Journal of Physics: Conference Series}, volume = {180}, year = {2009}, month = {2009-00}, keywords = {magma, plasma}, author = {Emmanuel Agullo and James Demmel and Jack Dongarra and Bilel Hadri and Jakub Kurzak and Julien Langou and Hatem Ltaeif and Piotr Luszczek and Stanimire Tomov} } @article {1365, title = {Numerical Linear Algebra on Hybrid Architectures: Recent Developments in the MAGMA Project}, year = {2009}, month = {2009-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC09)}, address = {Portland, Oregon}, author = {Rajib Nath and Jack Dongarra and Stanimire Tomov and Hatem Ltaeif and Peng Du} } @article {icl:492, title = {Optimizing Matrix Multiplication for a Short-Vector SIMD Architecture - CELL Processor}, journal = {Parallel Computing}, volume = {35}, year = {2009}, month = {2009-00}, pages = {138-150}, author = {Wesley Alvaro and Jakub Kurzak and Jack Dongarra} } @article {icl:489, title = {Parallel Band Two-Sided Matrix Bidiagonalization for Multicore Architectures}, journal = {IEEE Transactions on Parallel and Distributed Systems (to appear)}, year = {2009}, month = {2009-05}, author = {Hatem Ltaeif and Jakub Kurzak and Jack Dongarra} } @article {icl:505, title = {Parallel Dense Linear Algebra Software in the Multicore Era}, journal = {in Cyberinfrastructure Technologies and Applications}, year = {2009}, month = {2009-00}, pages = {9-24}, publisher = {Nova Science Publishers, Inc.}, keywords = {plasma}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Julien Langou}, editor = {Junwei Cao} } @article {icl:500, title = {Paravirtualization Effect on Single- and Multi-threaded Memory-Intensive Linear Algebra Software}, journal = {Cluster Computing Journal: Special Issue on High Performance Distributed Computing}, volume = {12}, number = {2}, year = {2009}, month = {2009-00}, pages = {101-122}, publisher = {Springer Netherlands}, author = {Lamia Youseff and Keith Seymour and Haihang You and Dmitrii Zagorodnov and Jack Dongarra and Rich Wolski} } @inproceedings {icl:478, title = {Performance evaluation for petascale quantum simulation tools}, journal = {Proceedings of CUG09}, year = {2009}, month = {2009-05}, address = {Atlanta, GA}, keywords = {doe-nano}, author = {Stanimire Tomov and Wenchang Lu and Jerzy Bernholc and Shirley Moore and Jack Dongarra} } @article {icl:491, title = {The Problem with the Linpack Benchmark Matrix Generator}, journal = {International Journal of High Performance Computing Applications}, volume = {23}, number = {1}, year = {2009}, month = {2009-00}, pages = {5-14}, keywords = {hpl}, author = {Julien Langou and Jack Dongarra} } @article {icl:508, title = {QR Factorization for the CELL Processor}, journal = {Scientific Programming (to appear)}, year = {2009}, month = {2009-00}, keywords = {plasma}, author = {Jakub Kurzak and Jack Dongarra} } @conference {870, title = {Reasons for a Pessimistic or Optimistic Message Logging Protocol in MPI Uncoordinated Failure Recovery}, booktitle = {CLUSTER {\textquoteright}09}, year = {2009}, month = {2009-08}, publisher = {IEEE}, organization = {IEEE}, address = {New Orleans}, abstract = {With the growing scale of high performance computing platforms, fault tolerance has become a major issue. Among the various approaches for providing fault tolerance to MPI applications, message logging has been proved to tolerate higher failure rate. However, this advantage comes at the expense of a higher overhead on communications, due to latency intrusive logging of events to a stable storage. Previous work proposed and evaluated several protocols relaxing the synchronicity of event logging to moderate this overhead. Recently, the model of message logging has been refined to better match the reality of high performance network cards, where message receptions are decomposed in multiple interdependent events. According to this new model, deterministic and non-deterministic events are clearly discriminated, reducing the overhead induced by message logging. In this paper we compare, experimentally, a pessimistic and an optimistic message logging protocol, using this new model and implemented in the Open MPI library. Although pessimistic and optimistic message logging are, respectively, the most and less synchronous message logging paradigms, experiments show that most of the time their performance is comparable.}, keywords = {fault tolerant computing, libraries message passing, parallel machines, protocols}, doi = {10.1109/CLUSTR.2009.5289157}, author = {Aurelien Bouteiller and Thomas Ropars and George Bosilca and Christine Morin and Jack Dongarra} } @article {icl:513, title = {Recent Trends in High Performance Computing}, journal = {in Birth of Numerical Analysis (to appear)}, year = {2009}, month = {2009-00}, author = {Jack Dongarra and Hans Meuer and Horst D. Simon and Erich Strohmaier} } @article {icl:511, title = {Rectangular Full Packed Format for Cholesky{\textquoteright}s Algorithm: Factorization, Solution and Inversion}, journal = {ACM TOMS (to appear)}, year = {2009}, month = {2009-00}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra and Julien Langou} } @article {icl:420, title = {Reliability and Performance Modeling and Analysis for Grid Computing}, journal = {in Handbook of Research on Scalable Computing Technologies (to appear)}, year = {2009}, month = {2009-00}, pages = {219-245}, publisher = {IGI Global}, author = {Yuan-Shun Dai and Jack Dongarra}, editor = {Kuan-Ching Li and Ching-Hsien Hsu and Laurence Yang and Jack Dongarra and Hans Zima} } @inproceedings {icl:501, title = {A Scalable Non-blocking Multicast Scheme for Distributed DAG Scheduling}, journal = {The International Conference on Computational Science 2009 (ICCS 2009)}, volume = {5544}, year = {2009}, month = {2009-05}, pages = {195-204}, address = {Baton Rouge, LA}, keywords = {plasma}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @techreport {icl:495, title = {Scheduling Linear Algebra Operations on Multicore Processors}, journal = {University of Tennessee Computer Science Department Technical Report, UT-CS-09-636 (Also LAPACK Working Note 213)}, year = {2009}, month = {2009-00}, author = {Jakub Kurzak and Hatem Ltaeif and Jack Dongarra and Rosa M. Badia} } @article {icl:510, title = {Scheduling Linear Algebra Operations on Multicore Processors}, journal = {Concurrency Practice and Experience (to appear)}, year = {2009}, month = {2009-00}, keywords = {plasma}, author = {Jakub Kurzak and Hatem Ltaeif and Jack Dongarra and Rosa M. Badia} } @techreport {icl:487, title = {Tall and Skinny QR Matrix Factorization Using Tile Algorithms on Multicore Architectures}, journal = {Innovative Computing Laboratory Technical Report (also LAPACK Working Note 222 and CS Tech Report UT-CS-09-645)}, number = {ICL-UT-09-03}, year = {2009}, month = {2009-09}, keywords = {plasma}, author = {Bilel Hadri and Hatem Ltaeif and Emmanuel Agullo and Jack Dongarra} } @inproceedings {icl:522, title = {Tile QR Factorization with Parallel Panel Processing for Multicore Architectures}, journal = {accepted in 24th IEEE International Parallel and Distributed Processing Symposium (IPDPS 2010)}, year = {2009}, month = {2009-12}, address = {Atlanta, GA}, keywords = {plasma}, author = {Bilel Hadri and Hatem Ltaeif and Emmanuel Agullo and Jack Dongarra} } @article {icl:506, title = {Towards Efficient MapReduce Using MPI}, journal = {Lecture Notes in Computer Science, Recent Advances in Parallel Virtual Machine and Message Passing Interface - 16th European PVM/MPI Users{\textquoteright} Group Meeting}, volume = {5759}, year = {2009}, month = {2009-00}, pages = {240-249}, publisher = {Springer Berlin / Heidelberg}, address = {Espoo, Finland}, author = {Torsten Hoefler and Yuan-Shun Dai and Jack Dongarra}, editor = {M. Ropo and J Westerholm and Jack Dongarra} } @techreport {icl:475, title = {Trace-based Performance Analysis for the Petascale Simulation Code FLASH}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-09-01}, year = {2009}, month = {2009-04}, keywords = {test}, author = {Heike Jagode and Andreas Knuepfer and Jack Dongarra and Matthias Jurenz and Matthias S. Mueller and Wolfgang E. Nagel} } @article {icl:515, title = {Transparent Cross-Platform Access to Software Services using GridSolve and GridRPC}, journal = {in Cloud Computing and Software Services: Theory and Techniques (to appear)}, year = {2009}, month = {2009-00}, publisher = {CRC Press}, keywords = {netsolve}, author = {Keith Seymour and Asim YarKhan and Jack Dongarra}, editor = {Syed Ahson and Mohammad Ilyas} } @inproceedings {icl:453, journal = {8th International Conference on Computational Science (ICCS), Proceedings Parts I, II, and III, Lecture Notes in Computer Science}, volume = {5101}, year = {2008}, month = {2008-01}, publisher = {Springer Berlin}, address = {Krakow, Poland}, editor = {Marian Bubak and Geert Dick van Albada and Jack Dongarra and Peter M. Sloot} } @inproceedings {icl:452, journal = {7th International parallel Processing and Applied Mathematics Conference, Lecture Notes in Comptuer Science}, volume = {4967}, year = {2008}, month = {2008-01}, publisher = {Springer Berlin}, address = {Gdansk, Poland}, editor = {Roman Wyrzykowski and Jack Dongarra and Konrad Karczewski and Jerzy Wasniewski} } @article {icl:454, journal = {15th European PVM/MPI Users{\textquoteright} Group Meeting, Recent Advances in Parallel Virtual Machine and Message Passing Interface, Lecture Notes in Computer Science}, volume = {5205}, year = {2008}, month = {2008-01}, publisher = {Springer Berlin}, address = {Dublin Ireland}, editor = {Alexey Lastovetsky and Tahar Kechadi and Jack Dongarra} } @article {icl:437, title = {Algorithm-Based Fault Tolerance for Fail-Stop Failures}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {19}, number = {12}, year = {2008}, month = {2008-01}, keywords = {FT-MPI, lapack, scalapack}, author = {Zizhong Chen and Jack Dongarra} } @techreport {icl:426, title = {Algorithmic Based Fault Tolerance Applied to High Performance Computing}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-620 (also LAPACK Working Note 205)}, year = {2008}, month = {2008-01}, author = {George Bosilca and Remi Delmas and Jack Dongarra and Julien Langou} } @techreport {icl:432, title = {Analytical Modeling for Affinity-Based Thread Scheduling on Multicore Platforms}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-626}, year = {2008}, month = {2008-01}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @inproceedings {icl:418, title = {A Comparison of Search Heuristics for Empirical Code Optimization}, journal = {The 3rd international Workshop on Automatic Performance Tuning}, year = {2008}, month = {2008-10}, address = {Tsukuba, Japan}, keywords = {gco}, author = {Keith Seymour and Haihang You and Jack Dongarra} } @article {icl:457, title = {Computing the Conditioning of the Components of a Linear Least Squares Solution}, journal = {VECPAR {\textquoteright}08, High Performance Computing for Computational Science}, year = {2008}, month = {2008-01}, address = {Toulouse, France}, author = {Marc Baboulin and Jack Dongarra and Serge Gratton and Julien Langou} } @article {icl:451, title = {DARPA{\textquoteright}s HPCS Program: History, Models, Tools, Languages}, journal = {in Advances in Computers}, volume = {72}, year = {2008}, month = {2008-01}, publisher = {Elsevier}, author = {Jack Dongarra and Robert Graybill and William Harrod and Robert Lucas and Ewing Lusk and Piotr Luszczek and Janice McMahon and Allan Snavely and Jeffrey Vetter and Katherine Yelick and Sadaf Alam and Roy Campbell and Laura Carrington and Tzu-Yi Chen and Omid Khalili and Jeremy Meredith and Mustafa Tikir}, editor = {M. Zelkowitz} } @article {1353, title = {Enhancing the Performance of Dense Linear Algebra Solvers on GPUs (in the MAGMA Project)}, year = {2008}, month = {2008-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC08)}, address = {Austin, TX}, author = {Marc Baboulin and James Demmel and Jack Dongarra and Stanimire Tomov and Vasily Volkov} } @article {icl:449, title = {Exploiting Mixed Precision Floating Point Hardware in Scientific Computations}, journal = {in High Performance Computing and Grids in Action}, year = {2008}, month = {2008-01}, publisher = {IOS Press}, address = {Amsterdam}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Julien Langou and Julien Langou and Piotr Luszczek and Stanimire Tomov}, editor = {Lucio Grandinetti} } @inproceedings {icl:440, title = {Exploring New Architectures in Accelerating CFD for Air Force Applications}, journal = {Proceedings of the DoD HPCMP User Group Conference}, year = {2008}, month = {2008-01}, address = {Seattle, Washington}, keywords = {magma}, author = {Jack Dongarra and Shirley Moore and Gregory D. Peterson and Stanimire Tomov and Jeff Allred and Vincent Natoli and David Richie} } @techreport {icl:405, title = {Fast and Small Short Vector SIMD Matrix Multiplication Kernels for the CELL Processor}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-08-609, (also LAPACK Working Note 189)}, year = {2008}, month = {2008-01}, keywords = {plasma}, author = {Wesley Alvaro and Jakub Kurzak and Jack Dongarra} } @article {icl:450, title = {How Elegant Code Evolves With Hardware: The Case Of Gaussian Elimination}, journal = {in Beautiful Code Leading Programmers Explain How They Think (Chapter 14)}, year = {2008}, month = {2008-01}, pages = {243-282}, author = {Jack Dongarra and Piotr Luszczek}, editor = {Andy Oram and G. Wilson} } @techreport {icl:427, title = {HPCS Library Study Effort}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-617}, year = {2008}, month = {2008-01}, author = {Jack Dongarra and James Demmel and Parry Husbands and Piotr Luszczek} } @inproceedings {icl:410, title = {The Impact of Paravirtualized Memory Hierarchy on Linear Algebra Computational Kernels and Software}, journal = {ACM/IEEE International Symposium on High Performance Distributed Computing}, year = {2008}, month = {2008-06}, address = {Boston, MA.}, keywords = {gco, netsolve}, author = {Lamia Youseff and Keith Seymour and Haihang You and Jack Dongarra and Rich Wolski} } @article {icl:408, title = {Interactive Grid-Access Using Gridsolve and Giggle}, journal = {Computing and Informatics}, volume = {27}, number = {2}, year = {2008}, month = {2008-00}, pages = {233-248,ISSN1335-9150}, keywords = {netsolve}, author = {Marcus Hardt and Keith Seymour and Jack Dongarra and Michael Zapf and Nicole Ruiter} } @inproceedings {icl:416, title = {Interior State Computation of Nano Structures}, journal = {PARA 2008, 9th International Workshop on State-of-the-Art in Scientific and Parallel Computing}, year = {2008}, month = {2008-05}, address = {Trondheim, Norway}, author = {Andrew Canning and Jack Dongarra and Julien Langou and Osni Marques and Stanimire Tomov and Christof Voemel and Lin-Wang Wang} } @article {icl:436, title = {The LINPACK Benchmark: Past, Present, and Future}, journal = {Concurrency: Practice and Experience}, volume = {15}, year = {2008}, month = {2008-00}, pages = {803-820}, keywords = {hpl}, author = {Jack Dongarra and Piotr Luszczek and Antoine Petitet} } @inproceedings {icl:455, title = {Matrix Product on Heterogeneous Master Worker Platforms}, journal = {2008 PPoPP Conference}, year = {2008}, month = {2008-01}, address = {Salt Lake City, Utah}, author = {Jack Dongarra and Jean-Francois Pineau and Yves Robert and Frederic Vivien} } @article {icl:425, title = {Netlib and NA-Net: Building a Scientific Computing Community}, journal = {IEEE Annals of the History of Computing}, volume = {30}, number = {2}, year = {2008}, month = {2008-01}, pages = {30-41}, author = {Jack Dongarra and Gene H. Golub and Eric Grosse and Cleve Moler and Keith Moore} } @techreport {icl:431, title = {Parallel Block Hessenberg Reduction using Algorithms-By-Tiles for Multicore Architectures Revisited}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-624 (also LAPACK Working Note 208)}, year = {2008}, month = {2008-08}, keywords = {plasma}, author = {Hatem Ltaeif and Jakub Kurzak and Jack Dongarra} } @article {icl:446, title = {Parallel Tiled QR Factorization for Multicore Architectures}, journal = {Concurrency and Computation: Practice and Experience}, volume = {20}, year = {2008}, month = {2008-01}, pages = {1573-1590}, author = {Alfredo Buttari and Julien Langou and Jakub Kurzak and Jack Dongarra} } @article {icl:417, title = {Performance Instrumentation and Compiler Optimizations for MPI/OpenMP Applications}, journal = {Lecture Notes in Computer Science, OpenMP Shared Memory Parallel Programming}, volume = {4315}, year = {2008}, month = {2008-00}, publisher = {Springer Berlin / Heidelberg}, author = {Oscar Hernandez and Fengguang Song and Barbara Chapman and Jack Dongarra and Bernd Mohr and Shirley Moore and Felix Wolf} } @techreport {icl:441, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Technical Report, CS-89-85}, year = {2008}, month = {2008-01}, author = {Jack Dongarra} } @article {icl:462, title = {PERI Auto-tuning}, journal = {Proc. SciDAC 2008}, volume = {125}, year = {2008}, month = {2008-01}, publisher = {Journal of Physics}, address = {Seatlle, Washington}, keywords = {gco}, author = {David Bailey and Jacqueline Chame and Chun Chen and Jack Dongarra and Mary Hall and Jeffrey K. Hollingsworth and Paul D. Hovland and Shirley Moore and Keith Seymour and Jaewook Shin and Ananta Tiwari and Sam Williams and Haihang You} } @article {icl:444, title = {The PlayStation 3 for High Performance Scientific Computing}, journal = {Computing in Science and Engineering}, year = {2008}, month = {2008-01}, pages = {80-83}, author = {Jakub Kurzak and Alfredo Buttari and Piotr Luszczek and Jack Dongarra} } @techreport {icl:406, title = {The PlayStation 3 for High Performance Scientific Computing}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-08-608}, year = {2008}, month = {2008-01}, author = {Jakub Kurzak and Alfredo Buttari and Piotr Luszczek and Jack Dongarra} } @techreport {icl:423, title = {The Problem with the Linpack Benchmark Matrix Generator}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-621 (also LAPACK Working Note 206)}, year = {2008}, month = {2008-06}, author = {Jack Dongarra and Julien Langou} } @techreport {icl:421, title = {QR Factorization for the CELL Processor}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-616 (also LAPACK Working Note 201)}, year = {2008}, month = {2008-05}, keywords = {plasma}, author = {Jakub Kurzak and Jack Dongarra} } @techreport {icl:422, title = {Rectangular Full Packed Format for Cholesky{\textquoteright}s Algorithm: Factorization, Solution and Inversion}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-614 (also LAPACK Working Note 199)}, year = {2008}, month = {2008-04}, author = {Fred G. Gustavson and Jerzy Wasniewski and Jack Dongarra} } @inproceedings {icl:456, title = {Redesigning the Message Logging Model for High Performance}, journal = {International Supercomputer Conference (ISC 2008)}, year = {2008}, month = {2008-01}, address = {Dresden, Germany}, author = {Aurelien Bouteiller and George Bosilca and Jack Dongarra} } @techreport {icl:407, title = {Request Sequencing: Enabling Workflow for Efficient Parallel Problem Solving in GridSolve}, journal = {ICL Technical Report}, number = {ICL-UT-08-01}, year = {2008}, month = {2008-04}, keywords = {netsolve}, author = {Yinan Li and Jack Dongarra} } @inproceedings {icl:419, title = {Request Sequencing: Enabling Workflow for Efficient Problem Solving in GridSolve}, journal = {International Conference on Grid and Cooperative Computing (GCC 2008) (submitted)}, year = {2008}, month = {2008-10}, address = {Shenzhen, China}, author = {Yinan Li and Jack Dongarra and Keith Seymour and Asim YarKhan} } @article {icl:504, title = {Revisiting Matrix Product on Master-Worker Platforms}, journal = {International Journal of Foundations of Computer Science (IJFCS)}, volume = {19}, number = {6}, year = {2008}, month = {2008-12}, pages = {1317-1336}, author = {Jack Dongarra and Jean-Francois Pineau and Yves Robert and Zhiao Shi and Frederic Vivien} } @article {icl:445, title = {Solving Systems of Linear Equations on the CELL Processor Using Cholesky Factorization}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {19}, number = {9}, year = {2008}, month = {2008-01}, pages = {1-11}, author = {Jakub Kurzak and Alfredo Buttari and Jack Dongarra} } @inproceedings {icl:516, title = {Some Issues in Dense Linear Algebra for Multicore and Special Purpose Architectures}, journal = {PARA 2008, 9th International Workshop on State-of-the-Art in Scientific and Parallel Computing}, year = {2008}, month = {2008-05}, address = {Trondheim Norway}, keywords = {magma}, author = {Marc Baboulin and Stanimire Tomov and Jack Dongarra} } @techreport {icl:415, title = {Some Issues in Dense Linear Algebra for Multicore and Special Purpose Architectures}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-615 (also LAPACK Working Note 200)}, year = {2008}, month = {2008-01}, keywords = {magma}, author = {Marc Baboulin and Jack Dongarra and Stanimire Tomov} } @article {icl:447, title = {State-of-the-Art Eigensolvers for Electronic Structure Calculations of Large Scale Nano-Systems}, journal = {Journal of Computational Physics}, volume = {227}, number = {15}, year = {2008}, month = {2008-01}, pages = {7113-7124}, author = {Christof Voemel and Stanimire Tomov and Osni Marques and Andrew Canning and Lin-Wang Wang and Jack Dongarra} } @techreport {icl:443, title = {Towards Dense Linear Algebra for Hybrid GPU Accelerated Manycore Systems}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-08-632 (also LAPACK Working Note 210)}, year = {2008}, month = {2008-01}, keywords = {magma}, author = {Stanimire Tomov and Jack Dongarra and Marc Baboulin} } @article {icl:448, title = {A Tribute to Gene Golub}, journal = {Computing in Science and Engineering}, year = {2008}, month = {2008-01}, pages = {5}, publisher = {IEEE}, author = {Jack Dongarra} } @article {icl:424, title = {Using Mixed Precision for Sparse Matrix Computations to Enhance the Performance while Achieving 64-bit Accuracy}, journal = {ACM Transactions on Mathematical Software}, volume = {34}, number = {4}, year = {2008}, month = {2008-00}, pages = {17-22}, keywords = {plasma}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov} } @techreport {icl:336, title = {Automated Empirical Tuning of a Multiresolution Analysis Kernel}, journal = {ICL Technical Report}, number = {ICL-UT-07-01}, year = {2007}, month = {2007-01}, pages = {10}, keywords = {gco}, author = {Haihang You and Keith Seymour and Jack Dongarra and Shirley Moore} } @article {icl:400, title = {Automatic Analysis of Inefficiency Patterns in Parallel Applications}, journal = {Concurrency and Computation: Practice and Experience}, volume = {19}, number = {11}, year = {2007}, month = {2007-08}, pages = {1481-1496}, author = {Felix Wolf and Bernd Mohr and Jack Dongarra and Shirley Moore} } @inproceedings {icl:355, title = {Binomial Graph: A Scalable and Fault- Tolerant Logical Network Topology}, journal = {Proceedings of The Fifth International Symposium on Parallel and Distributed Processing and Applications (ISPA07)}, year = {2007}, month = {2007-08}, publisher = {Springer}, address = {Niagara Falls, Canada}, keywords = {ftmpi}, author = {Thara Angskun and George Bosilca and Jack Dongarra} } @inproceedings {icl:372, title = {Bi-objective Scheduling Algorithms for Optimizing Makespan and Reliability on Heterogeneous Systems}, journal = {19th ACM Symposium on Parallelism in Algorithms and Architectures (SPAA) (submitted)}, year = {2007}, month = {2007-06}, address = {San Diego, CA}, author = {Jack Dongarra and Emmanuel Jeannot and Erik Saule and Zhiao Shi} } @techreport {icl:375, title = {A Class of Parallel Tiled Linear Algebra Algorithms for Multicore Architectures}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-07-600 (also LAPACK Working Note 191)}, year = {2007}, month = {2007-01}, keywords = {plasma}, author = {Alfredo Buttari and Julien Langou and Jakub Kurzak and Jack Dongarra} } @techreport {icl:391, title = {Computing the Conditioning of the Components of a Linear Least Squares Solution}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-07-604, (also LAPACK Working Note 193)}, year = {2007}, month = {2007-01}, author = {Marc Baboulin and Jack Dongarra and Serge Gratton and Julien Langou} } @article {icl:396, title = {Creating Software Technology to Harness the Power of Leadership-class Computing Systems}, journal = {DOE SciDAC Review (to appear)}, year = {2007}, month = {2007-06}, author = {John Mellor-Crummey and Pete Beckman and Jack Dongarra and Barton Miller and Katherine Yelick} } @article {icl:357, title = {Decision Trees and MPI Collective Algorithm Selection Problem}, journal = {Euro-Par 2007}, year = {2007}, month = {2007-08}, pages = {105{\textendash}115}, publisher = {Springer}, address = {Rennes, France}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and George Bosilca and Graham Fagg and Thara Angskun and Jack Dongarra} } @article {icl:366, title = {Disaster Survival Guide in Petascale Computing: An Algorithmic Approach}, journal = {in Petascale Computing: Algorithms and Applications (to appear)}, year = {2007}, month = {2007-00}, publisher = {Chapman \& Hall - CRC Press}, author = {Jack Dongarra and Zizhong Chen and George Bosilca and Julien Langou} } @techreport {icl:338, title = {Empirical Tuning of a Multiresolution Analysis Kernel using a Specialized Code Generator}, journal = {ICL Technical Report}, number = {ICL-UT-07-02}, year = {2007}, month = {2007-01}, keywords = {gco}, author = {Haihang You and Keith Seymour and Jack Dongarra and Shirley Moore} } @article {icl:392, title = {Exploiting Mixed Precision Floating Point Hardware in Scientific Computations}, journal = {In High Performance Computing and Grids in Action (to appear)}, year = {2007}, month = {2007-00}, publisher = {IOS Press}, address = {Amsterdam}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Julien Langou and Julie Langou and Piotr Luszczek and Stanimire Tomov}, editor = {Lucio Grandinetti} } @inproceedings {icl:367, title = {Feedback-Directed Thread Scheduling with Memory Considerations}, journal = {IEEE International Symposium on High Performance Distributed Computing}, year = {2007}, month = {2007-06}, address = {Monterey Bay, CA}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @inproceedings {icl:339, title = {GridSolve: The Evolution of Network Enabled Solver}, journal = {Grid-Based Problem Solving Environments: IFIP TC2/WG 2.5 Working Conference on Grid-Based Problem Solving Environments (Prescott, AZ, July 2006)}, year = {2007}, month = {2007-00}, pages = {215-226}, publisher = {Springer}, keywords = {netsolve}, author = {Asim YarKhan and Jack Dongarra and Keith Seymour}, editor = {Patrick Gaffney} } @article {icl:398, title = {High Performance Development for High End Computing with Python Language Wrapper (PLW)}, journal = {International Journal for High Performance Computer Applications}, volume = {21}, number = {3}, year = {2007}, month = {2007-00}, pages = {360-369}, author = {Jack Dongarra and Piotr Luszczek} } @article {icl:365, title = {How Elegant Code Evolves With Hardware: The Case Of Gaussian Elimination}, journal = {in Beautiful Code Leading Programmers Explain How They Think}, year = {2007}, month = {2007-06}, publisher = {O{\textquoteright}Reilly Media, Inc.}, author = {Jack Dongarra and Piotr Luszczek}, editor = {Andy Oram and Greg Wilson} } @article {, title = {The Impact of Multicore on Computational Science Software}, journal = {CTWatch Quarterly}, volume = {3}, year = {2007}, month = {2007-02}, author = {Jack Dongarra and Dennis Gannon and Geoffrey Fox and Ken Kennedy} } @article {icl:399, title = {Implementation of Mixed Precision in Solving Systems of Linear Equations on the Cell Processor}, journal = {Concurrency and Computation: Practice and Experience}, volume = {19}, number = {10}, year = {2007}, month = {2007-07}, pages = {1371-1385}, author = {Jakub Kurzak and Jack Dongarra} } @article {icl:403, title = {Improved Runtime and Transfer Time Prediction Mechanisms in a Network Enabled Servers Middleware}, journal = {Parallel Processing Letters}, volume = {17}, number = {1}, year = {2007}, month = {2007-03}, pages = {47-59}, author = {Emmanuel Jeannot and Keith Seymour and Asim YarKhan and Jack Dongarra} } @inproceedings {icl:386, title = {L2 Cache Modeling for Scientific Applications on Chip Multi-Processors}, journal = {Proceedings of the 2007 International Conference on Parallel Processing}, year = {2007}, month = {2007-01}, publisher = {IEEE Computer Society}, address = {Xi{\textquoteright}an, China}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @techreport {icl:404, title = {Limitations of the Playstation 3 for High Performance Cluster Computing}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-07-597 (Also LAPACK Working Note 185)}, year = {2007}, month = {2007-00}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak} } @article {icl:395, title = {Mixed Precision Iterative Refinement Techniques for the Solution of Dense Linear Systems}, journal = {International Journal of High Performance Computer Applications (to appear)}, year = {2007}, month = {2007-08}, author = {Alfredo Buttari and Jack Dongarra and Julien Langou and Julie Langou and Piotr Luszczek and Jakub Kurzak} } @article {icl:356, title = {MPI Collective Algorithm Selection and Quadtree Encoding}, journal = {Parallel Computing (Special Edition: EuroPVM/MPI 2006)}, year = {2007}, month = {2007-00}, publisher = {Elsevier}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and George Bosilca and Graham Fagg and Thara Angskun and Jack Dongarra} } @inproceedings {icl:388, title = {Multithreading for synchronization tolerance in matrix factorization}, journal = {Journal of Physics: Conference Series, SciDAC 2007}, volume = {78}, number = {2007}, year = {2007}, month = {2007-01}, author = {Alfredo Buttari and Jack Dongarra and Parry Husbands and Jakub Kurzak and Katherine Yelick} } @article {icl:394, title = {Netlib and NA-Net: building a scientific computing community}, journal = {In IEEE Annals of the History of Computing (to appear)}, year = {2007}, month = {2007-08}, author = {Jack Dongarra and Gene H. Golub and Cleve Moler and Keith Moore} } @inproceedings {icl:374, title = {Optimal Routing in Binomial Graph Networks}, journal = {The International Conference on Parallel and Distributed Computing, applications and Technologies (PDCAT)}, year = {2007}, month = {2007-12}, publisher = {IEEE Computer Society}, address = {Adelaide, Australia}, keywords = {ftmpi}, author = {Thara Angskun and George Bosilca and Brad Vander Zanden and Jack Dongarra} } @techreport {icl:363, title = {Parallel Tiled QR Factorization for Multicore Architectures}, journal = {University of Tennessee Computer Science Dept. Technical Report, UT-CS-07-598 (also LAPACK Working Note 190)}, year = {2007}, month = {2007-00}, keywords = {plasma}, author = {Alfredo Buttari and Julien Langou and Jakub Kurzak and Jack Dongarra} } @article {icl:358, title = {Performance Analysis of MPI Collective Operations}, journal = {Cluster computing}, volume = {10}, number = {2}, year = {2007}, month = {2007-06}, pages = {127-143}, publisher = {Springer Netherlands}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and Thara Angskun and George Bosilca and Graham Fagg and Edgar Gabriel and Jack Dongarra} } @techreport {icl:381, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Dept. Technical Report CS-89-85}, year = {2007}, month = {2007-00}, author = {Jack Dongarra} } @article {icl:397, title = {Recovery Patterns for Iterative Methods in a Parallel Unstable Environment}, journal = {SIAM SISC (to appear)}, year = {2007}, month = {2007-05}, author = {Julien Langou and Zizhong Chen and George Bosilca and Jack Dongarra} } @inproceedings {icl:354, title = {Reliability Analysis of Self-Healing Network using Discrete-Event Simulation}, journal = {Proceedings of Seventh IEEE International Symposium on Cluster Computing and the Grid (CCGrid {\textquoteright}07)}, year = {2007}, month = {2007-05}, pages = {437-444}, publisher = {IEEE Computer Society}, keywords = {ftmpi}, author = {Thara Angskun and George Bosilca and Graham Fagg and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra} } @article {icl:378, title = {Remembering Ken Kennedy}, journal = {SciDAC Review}, volume = {5}, number = {2007}, year = {2007}, month = {2007-00}, author = {Jack Dongarra and et al.,} } @article {icl:353, title = {Retrospect: Deterministic Relay of MPI Applications for Interactive Distributed Debugging}, journal = {Accepted for Euro PVM/MPI 2007}, year = {2007}, month = {2007-09}, publisher = {Springer}, keywords = {ftmpi}, author = {Aurelien Bouteiller and George Bosilca and Jack Dongarra} } @article {icl:371, title = {Revisiting Matrix Product on Master-Worker Platforms}, journal = {International Journal of Foundations of Computer Science (IJFCS) (accepted)}, year = {2007}, month = {2007-00}, author = {Jack Dongarra and Jean-Francois Pineau and Yves Robert and Zhiao Shi and Frederic Vivien} } @inproceedings {icl:384, title = {Scalability Analysis of the SPEC OpenMP Benchmarks on Large-Scale Shared Memory Multiprocessors}, journal = {Proceedings of the 2007 International Conference on Computational Science (ICCS 2007)}, volume = {4487-4490}, year = {2007}, pages = {815-822}, publisher = {Springer LNCS}, address = {Beijing, China}, keywords = {kojak}, doi = {10.1007/978-3-540-72586-2_115}, author = {Karl F{\"u}rlinger and Michael Gerndt and Jack Dongarra}, editor = {Yong Shi and Jack Dongarra and Geert Dick van Albada and Peter M. Sloot} } @techreport {icl:364, title = {SCOP3: A Rough Guide to Scientific Computing On the PlayStation 3}, journal = {University of Tennessee Computer Science Dept. Technical Report, UT-CS-07-595}, year = {2007}, month = {2007-00}, keywords = {multi-core}, author = {Alfredo Buttari and Piotr Luszczek and Jakub Kurzak and Jack Dongarra and George Bosilca} } @inproceedings {icl:393, title = {Self Adapting Application Level Fault Tolerance for Parallel and Distributed Computing}, journal = {Proceedings of Workshop on Self Adapting Application Level Fault Tolerance for Parallel and Distributed Computing at IPDPS}, year = {2007}, month = {2007-03}, pages = {1-8}, author = {Zizhong Chen and Ming Yang and Guillermo Francia III and Jack Dongarra} } @inproceedings {icl:380, title = {Self-Healing in Binomial Graph Networks}, journal = {2nd International Workshop On Reliability in Decentralized Distributed Systems (RDDS 2007)}, year = {2007}, month = {2007-11}, address = {Vilamoura, Algarve, Portugal}, author = {Thara Angskun and George Bosilca and Jack Dongarra} } @techreport {icl:341, title = {Solving Systems of Linear Equations on the CELL Processor Using Cholesky Factorization}, journal = {UT Computer Science Technical Report (Also LAPACK Working Note 184)}, number = {UT-CS-07-596}, year = {2007}, month = {2007-01}, keywords = {lapack}, author = {Jakub Kurzak and Alfredo Buttari and Jack Dongarra} } @article {icl:401, title = {The Use of Bulk States to Accelerate the Band Edge State Calculation of a Semiconductor Quantum Dot}, journal = {Journal of Computational Physics}, volume = {223}, year = {2007}, month = {2007-00}, pages = {774-782}, author = {Christof Voemel and Stanimire Tomov and Lin-Wang Wang and Osni Marques and Jack Dongarra} } @inproceedings {icl:382, title = {On Using Incremental Profiling for the Performance Analysis of Shared Memory Parallel Applications}, journal = {Proceedings of the 13th International Euro-Par Conference on Parallel Processing (Euro-Par {\textquoteright}07)}, year = {2007}, month = {2007-01}, publisher = {Springer LNCS}, address = {Rennes, France}, keywords = {kojak}, author = {Karl F{\"u}rlinger and Jack Dongarra and Michael Gerndt} } @inproceedings {icl:331, title = {Algorithm-Based Checkpoint-Free Fault Tolerance for Parallel Matrix Computations on Volatile Resources}, journal = {IPDPS 2006, 20th IEEE International Parallel and Distributed Processing Symposium}, year = {2006}, month = {2006-01}, address = {Rhodes Island, Greece}, author = {Zizhong Chen and Jack Dongarra} } @article {, title = {An Asynchronous Algorithm on NetSolve Global Computing System}, journal = {Future Generation Computer Systems}, volume = {22}, year = {2006}, month = {2006-02}, pages = {279-290}, abstract = {The explicitly restarted Arnoldi method (ERAM) allows one to find a few eigenpairs of a large sparse matrix. The multiple explicitly restarted Arnoldi method (MERAM) is a technique based upon a multiple projection of ERAM and accelerates its convergence [N. Emamad, S. Petiton, G. Edjlali, Multiple explicitly restarted Arnoldi method for solving large eigenproblems, SIAM J. Sci. Comput. SJSC 27 (1) (2005) 253-277]. MERAM allows one to update the restarting vector of an ERAM by taking into account the interesting eigen-information obtained by its other ERAM processes. This method is particularly well suited to the GRID-type environments. We present an adaptation of the asynchronous version of MERAM for the NetSolve global computing system. We point out some advantages and limitations of this kind of system to implement the asynchronous hybrid algorithms. We give some results of our experiments and show that we can obtain a good acceleration of the convergence compared to ERAM. These results also show the potential of the MERAM-like hybrid methods for the GRID computing environments.}, keywords = {Arnoldi method, Explicit restarting, Global computing, Large eigenproblem, netsolve}, doi = {https://doi.org/10.1016/j.future.2005.10.003}, author = {Jack Dongarra and Nahid Emad and Seyed Abolfazl Shahzadeh-Fazeli} } @techreport {icl:313, title = {ATLAS on the BlueGene/L {\textendash} Preliminary Results}, journal = {ICL Technical Report}, number = {ICL-UT-06-10}, year = {2006}, month = {2006-01}, keywords = {gco}, author = {Keith Seymour and Haihang You and Jack Dongarra} } @article {icl:402, title = {Conjugate-Gradient Eigenvalue Solvers in Computing Electronic Properties of Nanostructure Architectures}, journal = {International Journal of Computational Science and Engineering}, volume = {2}, number = {3/4}, year = {2006}, month = {2006-00}, pages = {205-212}, author = {Stanimire Tomov and Julien Langou and Jack Dongarra and Andrew Canning and Lin-Wang Wang} } @inproceedings {icl:329, title = {Experiments with Strassen{\textquoteright}s Algorithm: From Sequential to Parallel}, journal = {18th IASTED International Conference on Parallel and Distributed Computing and Systems PDCS 2006 (submitted)}, year = {2006}, month = {2006-01}, address = {Dallas, Texas}, author = {Fengguang Song and Jack Dongarra and Shirley Moore} } @article {icl:317, title = {Exploiting the Performance of 32 bit Floating Point Arithmetic in Obtaining 64 bit Accuracy}, journal = {University of Tennessee Computer Science Tech Report}, number = {UT-CS-06-574, LAPACK Working Note $\#$175}, year = {2006}, month = {2006-04}, keywords = {iter-ref}, author = {Julien Langou and Julien Langou and Piotr Luszczek and Jakub Kurzak and Alfredo Buttari and Jack Dongarra} } @article {icl:315, title = {Flexible collective communication tuning architecture applied to Open MPI}, journal = {2006 Euro PVM/MPI (submitted)}, year = {2006}, month = {2006-01}, address = {Bonn, Germany}, keywords = {ftmpi}, author = {Graham Fagg and Jelena Pjesivac{\textendash}Grbovic and George Bosilca and Thara Angskun and Jack Dongarra} } @inproceedings {icl:337, title = {The HPC Challenge (HPCC) Benchmark Suite}, journal = {SC06 Conference Tutorial}, year = {2006}, month = {2006-11}, publisher = {IEEE}, address = {Tampa, Florida}, keywords = {hpcc, hpcchallenge}, author = {Piotr Luszczek and David Bailey and Jack Dongarra and Jeremy Kepner and Robert Lucas and Rolf Rabenseifner and Daisuke Takahashi} } @article {icl:369, title = {The Impact of Multicore on Math Software}, journal = {PARA 2006}, year = {2006}, month = {2006-06}, address = {Umea, Sweden}, keywords = {plasma}, author = {Alfredo Buttari and Jack Dongarra and Jakub Kurzak and Julien Langou and Piotr Luszczek and Stanimire Tomov} } @article {icl:650, title = {Implementation and Usage of the PERUSE-Interface in Open MPI}, journal = {Euro PVM/MPI 2006}, year = {2006}, month = {2006-09}, address = {Bonn, Germany}, author = {Rainer Keller and George Bosilca and Graham Fagg and Michael Resch and Jack Dongarra} } @article {icl:318, title = {Implementation of the Mixed-Precision High Performance LINPACK Benchmark on the CELL Processor}, journal = {University of Tennessee Computer Science Tech Report}, number = {UT-CS-06-580, LAPACK Working Note $\#$177}, year = {2006}, month = {2006-09}, keywords = {iter-ref}, author = {Jakub Kurzak and Jack Dongarra} } @article {icl:321, title = {Implementing Linear Algebra Routines on Multi-Core Processors with Pipelining and a Look Ahead}, journal = {University of Tennessee Computer Science Tech Report, UT-CS-06-581, LAPACK Working Note $\#$178}, year = {2006}, month = {2006-01}, author = {Jakub Kurzak and Jack Dongarra} } @article {icl:340, title = {Improved Runtime and Transfer Time Prediction Mechanisms in a Network Enabled Server}, journal = {Parallel Processing Letters}, volume = {17}, number = {1}, year = {2006}, month = {2006-03}, pages = {47-59}, keywords = {netsolve}, author = {Emmanuel Jeannot and Keith Seymour and Asim YarKhan and Jack Dongarra} } @techreport {icl:334, title = {Modeling of L2 Cache Behavior for Thread-Parallel Scientific Programs on Chip Multi-Processors}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-06-583}, year = {2006}, month = {2006-01}, author = {Fengguang Song and Shirley Moore and Jack Dongarra} } @techreport {icl:314, title = {MPI Collective Algorithm Selection and Quadtree Encoding}, journal = {ICL Technical Report}, number = {ICL-UT-06-11}, year = {2006}, month = {2006-00}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and Graham Fagg and Thara Angskun and George Bosilca and Jack Dongarra} } @article {icl:323, title = {MPI Collective Algorithm Selection and Quadtree Encoding}, journal = {Lecture Notes in Computer Science}, volume = {4192}, number = {ICL-UT-06-13}, year = {2006}, month = {2006-09}, pages = {40-48}, publisher = {Springer Berlin / Heidelberg}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and Graham Fagg and Thara Angskun and George Bosilca and Jack Dongarra} } @inproceedings {icl:325, title = {Performance evaluation of eigensolvers in nano-structure computations}, journal = {IEEE/ACM Proceedings of HPCNano SC06 (to appear)}, year = {2006}, month = {2006-01}, keywords = {doe-nano}, author = {Andrew Canning and Jack Dongarra and Julien Langou and Osni Marques and Stanimire Tomov and Christof Voemel and Lin-Wang Wang} } @inproceedings {icl:319, title = {Performance Instrumentation and Compiler Optimizations for MPI/OpenMP Applications}, journal = {Second International Workshop on OpenMP}, year = {2006}, month = {2006-01}, address = {Reims, France}, keywords = {kojak}, author = {Oscar Hernandez and Fengguang Song and Barbara Chapman and Jack Dongarra and Bernd Mohr and Shirley Moore and Felix Wolf} } @techreport {icl:328, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Department Technical Report, UT-CS-04-526}, volume = {{\textendash}89-95}, year = {2006}, month = {2006-01}, author = {Jack Dongarra} } @article {icl:327, title = {Predicting the electronic properties of 3D, million-atom semiconductor nanostructure architectures}, journal = {J. Phys.: Conf. Ser. 46}, volume = {:101088/1742-6596/46/1/040}, year = {2006}, month = {2006-01}, pages = {292-298}, keywords = {DOE_NANO}, author = {Alex Zunger and Alberto Franceschetti and Gabriel Bester and Wesley B. Jones and Kwiseon Kim and Peter A. Graf and Lin-Wang Wang and Andrew Canning and Osni Marques and Christof Voemel and Jack Dongarra and Julien Langou and Stanimire Tomov} } @inproceedings {icl:310, title = {Proposal of MPI operation level Checkpoint/Rollback and one implementation}, journal = {Proceedings of IEEE CCGrid 2006}, year = {2006}, month = {2006-01}, publisher = {IEEE Computer Society}, keywords = {HARNESS/FT-PI}, author = {Yuan Tang and Graham Fagg and Jack Dongarra} } @article {icl:370, title = {Prospectus for the Next LAPACK and ScaLAPACK Libraries}, journal = {PARA 2006}, year = {2006}, month = {2006-06}, address = {Umea, Sweden}, author = {James Demmel and Jack Dongarra and B. Parlett and William Kahan and Ming Gu and David Bindel and Yozo Hida and Xiaoye Li and Osni Marques and Jason E. Riedy and Christof Voemel and Julien Langou and Piotr Luszczek and Jakub Kurzak and Alfredo Buttari and Julien Langou and Stanimire Tomov} } @article {icl:294, title = {Recent Developments in GridSolve}, journal = {International Journal of High Performance Computing Applications (Special Issue: Scheduling for Large-Scale Heterogeneous Platforms)}, volume = {20}, number = {1}, year = {2006}, month = {2006-00}, publisher = {Sage Science Press}, keywords = {netsolve}, author = {Asim YarKhan and Keith Seymour and Kiran Sagi and Zhiao Shi and Jack Dongarra}, editor = {Yves Robert} } @article {icl:316, title = {Scalable Fault Tolerant Protocol for Parallel Runtime Environments}, journal = {2006 Euro PVM/MPI}, number = {ICL-UT-06-12}, year = {2006}, month = {2006-00}, address = {Bonn, Germany}, keywords = {ftmpi}, author = {Thara Angskun and Graham Fagg and George Bosilca and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra} } @article {icl:332, title = {Self Adapting Numerical Software SANS Effort}, journal = {IBM Journal of Research and Development}, volume = {50}, number = {2/3}, year = {2006}, month = {2006-01}, pages = {223-238}, keywords = {gco}, author = {George Bosilca and Zizhong Chen and Jack Dongarra and Victor Eijkhout and Graham Fagg and Erika Fuentes and Julien Langou and Piotr Luszczek and Jelena Pjesivac{\textendash}Grbovic and Keith Seymour and Haihang You and Sathish Vadhiyar} } @inproceedings {icl:330, title = {Self-Healing Network for Scalable Fault Tolerant Runtime Environments}, journal = {DAPSYS 2006, 6th Austrian-Hungarian Workshop on Distributed and Parallel Systems}, year = {2006}, month = {2006-01}, address = {Innsbruck, Austria}, author = {Thara Angskun and Graham Fagg and George Bosilca and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra} } @inproceedings {icl:324, title = {Towards bulk based preconditioning for quantum dot computations}, journal = {IEEE/ACM Proceedings of HPCNano SC06 (to appear)}, year = {2006}, month = {2006-01}, keywords = {doe-nano}, author = {Andrew Canning and Jack Dongarra and Julien Langou and Osni Marques and Stanimire Tomov and Christof Voemel and Lin-Wang Wang} } @techreport {icl:308, title = {Twenty-Plus Years of Netlib and NA-Net}, journal = {University of Tennessee Computer Science Department Technical Report, UT-CS-04-526}, year = {2006}, month = {2006-00}, author = {Jack Dongarra and Gene H. Golub and Eric Grosse and Cleve Moler and Keith Moore} } @article {icl:326, title = {The use of bulk states to accelerate the band edge state calculation of a semiconductor quantum dot}, journal = {Journal of Computational Physics (submitted)}, year = {2006}, month = {2006-01}, keywords = {doe-nano}, author = {Christof Voemel and Stanimire Tomov and Lin-Wang Wang and Osni Marques and Jack Dongarra} } @techreport {icl:263, title = {Algorithm-Based Checkpoint-Free Fault Tolerance for Parallel Matrix Computations on Volatile Resources}, journal = {University of Tennessee Computer Science Department Technical Report}, volume = {{\textendash}05-561}, year = {2005}, month = {2005-11}, author = {Zizhong Chen and Jack Dongarra} } @article {icl:271, title = {Automatic analysis of inefficiency patterns in parallel applications}, journal = {Concurrency and Computation: Practice and Experience, Special issue "Automatic Performance Analysis" (submitted)}, year = {2005}, month = {2005-00}, keywords = {kojak}, author = {Felix Wolf and Bernd Mohr and Jack Dongarra and Shirley Moore} } @inproceedings {icl:288, title = {Automatic Experimental Analysis of Communication Patterns in Virtual Topologies}, journal = {In Proceedings of the International Conference on Parallel Processing}, year = {2005}, month = {2005-06}, publisher = {IEEE Computer Society}, address = {Oslo, Norway}, keywords = {kojak}, author = {Nikhil Bhatia and Fengguang Song and Felix Wolf and Jack Dongarra and Bernd Mohr and Shirley Moore} } @article {icl:157, title = {Biological Sequence Alignment on the Computational Grid Using the GrADS Framework}, journal = {Future Generation Computing Systems}, volume = {21}, number = {6}, year = {2005}, month = {2005-06}, pages = {980-986}, publisher = {Elsevier}, keywords = {grads}, author = {Asim YarKhan and Jack Dongarra} } @inproceedings {icl:284, title = {Comparison of Nonlinear Conjugate-Gradient methods for computing the Electronic Properties of Nanostructure Architectures}, journal = {Proceedings of 5th International Conference on Computational Science (ICCS)}, year = {2005}, month = {2005-01}, pages = {317-325}, publisher = {Springer{\textquoteright}s Lecture Notes in Computer Science}, address = {Atlanta, GA, USA}, keywords = {doe-nano}, author = {Stanimire Tomov and Julien Langou and Andrew Canning and Lin-Wang Wang and Jack Dongarra}, editor = {V. S. Sunderman and Geert Dick van Albada and Peter M. Sloot and Jack Dongarra} } @article {icl:277, title = {The Component Structure of a Self-Adapting Numerical Software System}, journal = {International Journal of Parallel Programming}, volume = {33}, number = {2}, year = {2005}, month = {2005-06}, keywords = {salsa, sans}, author = {Victor Eijkhout and Erika Fuentes and Thomas Eidson and Jack Dongarra} } @techreport {icl:303, title = {Condition Numbers of Gaussian Random Matrices}, journal = {University of Tennessee Computer Science Department Technical Report}, volume = {{\textendash}04-539}, year = {2005}, month = {2005-00}, keywords = {ft-la}, author = {Zizhong Chen and Jack Dongarra} } @article {icl:266, title = {Condition Numbers of Gaussian Random Matrices}, journal = {SIAM Journal on Matrix Analysis and Applications (to appear)}, year = {2005}, month = {2005-01}, keywords = {ftmpi, grads, lacsi, sans}, author = {Zizhong Chen and Jack Dongarra} } @article {icl:292, title = {Conjugate-Gradient Eigenvalue Solvers in Computing Electronic Properties of Nanostructure Architectures}, journal = {International Journal of Computational Science and Engineering (to appear)}, year = {2005}, month = {2005-01}, author = {Stanimire Tomov and Julien Langou and Andrew Canning and Lin-Wang Wang and Jack Dongarra} } @techreport {icl:255, title = {An Effective Empirical Search Method for Automatic Software Tuning}, journal = {ICL Technical Report}, number = {ICL-UT-05-02}, year = {2005}, month = {2005-01}, keywords = {gco}, author = {Haihang You and Keith Seymour and Jack Dongarra} } @inproceedings {icl:265, title = {Fault Tolerant High Performance Computing by a Coding Approach}, journal = {Proceedings of ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (to appear)}, year = {2005}, month = {2005-01}, address = {Chicago, Illinois}, keywords = {ftmpi, grads, lacsi, sans}, author = {Zizhong Chen and Graham Fagg and Edgar Gabriel and Julien Langou and Thara Angskun and George Bosilca and Jack Dongarra} } @inproceedings {icl:280, title = {Hash Functions for Datatype Signatures in MPI}, journal = {Proceedings of 12th European Parallel Virtual Machine and Message Passing Interface Conference - Euro PVM/MPI}, volume = {3666}, year = {2005}, month = {2005-09}, pages = {76-83}, publisher = {Springer-Verlag Berlin}, address = {Sorrento (Naples), Italy}, keywords = {ftmpi}, author = {George Bosilca and Jack Dongarra and Graham Fagg and Julien Langou}, editor = {Beniamino Di Martino} } @inproceedings {icl:248, title = {Improving Time to Solution with Automated Performance Analysis}, journal = {Second Workshop on Productivity and Performance in High-End Computing (P-PHEC) at 11th International Symposium on High Performance Computer Architecture (HPCA-2005)}, year = {2005}, month = {2005-02}, address = {San Francisco}, keywords = {kojak}, author = {Shirley Moore and Felix Wolf and Jack Dongarra and Bernd Mohr} } @article {icl:282, title = {Introduction to the HPC Challenge Benchmark Suite}, year = {2005}, month = {2005-03}, keywords = {hpcc, hpcchallenge}, author = {Piotr Luszczek and Jack Dongarra and David Koester and Rolf Rabenseifner and Bob Lucas and Jeremy Kepner and John McCalpin and David Bailey and Daisuke Takahashi} } @techreport {icl:253, title = {Introduction to the HPCChallenge Benchmark Suite}, journal = {ICL Technical Report}, number = {ICL-UT-05-01}, year = {2005}, note = {Also appears as CS Dept. Tech Report UT-CS-05-544}, month = {2005-01}, keywords = {hpcc, hpcchallenge}, author = {Jack Dongarra and Piotr Luszczek} } @article {icl:260, title = {LAPACK 2005 Prospectus: Reliable and Scalable Software for Linear Algebra Computations on High End Computers}, year = {2005}, month = {2005-01}, publisher = {LAPACK Working Note 164}, author = {James Demmel and Jack Dongarra} } @article {, title = {NanoPSE: A Nanoscience Problem Solving Environment for Atomistic Electronic Structure of Semiconductor Nanostructures}, journal = {Journal of Physics: Conference Series}, year = {2005}, month = {2005-06}, pages = {277-282}, abstract = {Researchers at the National Renewable Energy Laboratory and their collaborators have developed over the past ~10 years a set of algorithms for an atomistic description of the electronic structure of nanostructures, based on plane-wave pseudopotentials and configuration interaction. The present contribution describes the first step in assembling these various codes into a single, portable, integrated set of software packages. This package is part of an ongoing research project in the development stage. Components of NanoPSE include codes for atomistic nanostructure generation and passivation, valence force field model for atomic relaxation, code for potential field generation, empirical pseudopotential method solver, strained linear combination of bulk bands method solver, configuration interaction solver for excited states, selection of linear algebra methods, and several inverse band structure solvers. Although not available for general distribution at this time as it is being developed and tested, the design goal of the NanoPSE software is to provide a software context for collaboration. The software package is enabled by fcdev, an integrated collection of best practice GNU software for open source development and distribution augmented to better support FORTRAN.}, doi = {https://doi.org/10.1088/1742-6596/16/1/038}, url = {https://iopscience.iop.org/article/10.1088/1742-6596/16/1/038/meta}, author = {Wesley B. Jones and Gabriel Bester and Andrew Canning and Alberto Franceschetti and Peter A. Graf and Kwiseon Kim and Julien Langou and Lin-Wang Wang and Jack Dongarra and Alex Zunger} } @article {icl:276, title = {NetSolve: Grid Enabling Scientific Computing Environments}, journal = {Grid Computing and New Frontiers of High Performance Processing}, number = {14}, year = {2005}, month = {2005-00}, publisher = {Elsevier}, keywords = {netsolve}, author = {Keith Seymour and Asim YarKhan and Sudesh Agrawal and Jack Dongarra}, editor = {Lucio Grandinetti} } @article {icl:278, title = {New Grid Scheduling and Rescheduling Methods in the GrADS Project}, journal = {International Journal of Parallel Programming}, volume = {33}, number = {2}, year = {2005}, month = {2005-06}, pages = {209-229}, publisher = {Springer}, keywords = {grads}, author = {Francine Berman and Henri Casanova and Andrew Chien and Keith Cooper and Holly Dail and Anshuman Dasgupta and Wei Deng and Jack Dongarra and Lennart Johnsson and Ken Kennedy and Charles Koelbel and Bo Liu and Xu Liu and Anirban Mandal and Gabriel Marin and Mark Mazina and John Mellor-Crummey and Celso Mendes and A. Olugbile and Jignesh M. Patel and Dan Reed and Zhiao Shi and Otto Sievert and H. Xia and Asim YarKhan} } @article {icl:290, title = {A Not So Simple Matter of Software}, journal = {NCSA Access Online}, year = {2005}, month = {2005-00}, publisher = {NCSA}, author = {Jack Dongarra} } @inproceedings {icl:267, title = {Numerically Stable Real Number Codes Based on Random Matrices}, journal = {The International Conference on Computational Science}, year = {2005}, month = {2005-01}, publisher = {LNCS 3514, Springer-Verlag}, address = {Atlanta, GA}, keywords = {ftmpi, grads, lacsi, sans}, author = {Zizhong Chen and Jack Dongarra} } @article {icl:305, title = {Optimization Problem Solving System Using GridRPC}, journal = {IEEE Transactions on Parallel and Distributed Systems (submitted)}, year = {2005}, month = {2005-01}, author = {Hisashi Shimosaka and Tomoyuki Hiroyasu and Mitsunori Miki and Jack Dongarra} } @inproceedings {icl:274, title = {A Pattern-Based Approach to Automated Application Performance Analysis}, journal = {Workshop on Patterns in High Performance Computing}, year = {2005}, month = {2005-05}, address = {University of Illinois at Urbana-Champaign}, keywords = {kojak}, author = {Nikhil Bhatia and Shirley Moore and Felix Wolf and Jack Dongarra and Bernd Mohr} } @inproceedings {icl:249, title = {Performance Analysis of MPI Collective Operations}, journal = {4th International Workshop on Performance Modeling, Evaluation, and Optmization of Parallel and Distributed Systems (PMEO-PDS {\textquoteright}05)}, year = {2005}, month = {2005-04}, address = {Denver, Colorado}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and Thara Angskun and George Bosilca and Graham Fagg and Edgar Gabriel and Jack Dongarra} } @article {icl:306, title = {Performance Analysis of MPI Collective Operations}, journal = {Cluster Computing Journal (to appear)}, year = {2005}, month = {2005-01}, keywords = {ftmpi}, author = {Jelena Pjesivac{\textendash}Grbovic and Thara Angskun and George Bosilca and Graham Fagg and Edgar Gabriel and Jack Dongarra} } @techreport {icl:301, title = {Recovery Patterns for Iterative Methods in a Parallel Unstable Environment}, journal = {University of Tennessee Computer Science Department Technical Report, UT-CS-04-538}, year = {2005}, month = {2005-00}, keywords = {ft-la}, author = {George Bosilca and Zizhong Chen and Jack Dongarra and Julien Langou} } @techreport {icl:283, title = {Remote Software Toolkit Installer}, journal = {ICL Technical Report}, number = {ICL-UT-05-04}, year = {2005}, month = {2005-06}, keywords = {rest}, author = {Eric Meek and Jeff Larkin and Jack Dongarra} } @inproceedings {icl:270, title = {A Scalable Approach to MPI Application Performance Analysis}, journal = {In Proc. of the 12th European Parallel Virtual Machine and Message Passing Interface Conference}, year = {2005}, month = {2005-09}, publisher = {Springer LNCS}, keywords = {kojak}, author = {Shirley Moore and Felix Wolf and Jack Dongarra and Sameer Shende and Allen D. Malony and Bernd Mohr} } @inproceedings {icl:279, title = {Scalable Fault Tolerant MPI: Extending the Recovery Algorithm}, journal = {Proceedings of 12th European Parallel Virtual Machine and Message Passing Interface Conference - Euro PVM/MPI}, volume = {3666}, year = {2005}, month = {2005-09}, pages = {67}, publisher = {Springer-Verlag Berlin}, address = {Sorrento (Naples) , Italy}, keywords = {ftmpi}, author = {Graham Fagg and Thara Angskun and George Bosilca and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra}, editor = {Beniamino Di Martino} } @article {icl:244, title = {Self Adaptivity in Grid Computing}, journal = {Concurrency and Computation: Practice and Experience, Special Issue: Grid Performance}, volume = {17}, number = {2-4}, year = {2005}, month = {2005-00}, pages = {235-257}, keywords = {netsolve, sans}, author = {Sathish Vadhiyar and Jack Dongarra}, editor = {John Gurd and Anthony Hey and Juri Papay and Graham Riley} } @techreport {icl:261, title = {Towards an Accurate Model for Collective Communications}, journal = {ICL Technical Report}, number = {ICL-UT-05-03}, year = {2005}, month = {2005-01}, author = {Sathish Vadhiyar and Graham Fagg and Jack Dongarra} } @conference {icl:197, title = {Accurate Cache and TLB Characterization Using Hardware Counters}, booktitle = {International Conference on Computational Science (ICCS 2004)}, year = {2004}, month = {2004-06}, publisher = {Springer}, organization = {Springer}, address = {Krakow, Poland}, abstract = {We have developed a set of microbenchmarks for accurately determining the structural characteristics of data cache memories and TLBs. These characteristics include cache size, cache line size, cache associativity, memory page size, number of data TLB entries, and data TLB associativity. Unlike previous microbenchmarks that used time-based measurements, our microbenchmarks use hardware event counts to more accurately and quickly determine these characteristics while requiring fewer limiting assumptions.}, keywords = {gco, lacsi, papi}, doi = {https://doi.org/10.1007/978-3-540-24688-6_57}, author = {Jack Dongarra and Shirley Moore and Phil Mucci and Keith Seymour and Haihang You} } @inproceedings {icl:206, title = {Active Logistical State Management in the GridSolve/L}, journal = {4th International Symposium on Cluster Computing and the Grid (CCGrid 2004)(submitted)}, year = {2004}, month = {2004-01}, address = {Chicago, Illinois}, keywords = {netsolve}, author = {Micah Beck and Jack Dongarra and Jian Huang and Terry Moore and James Plank} } @inproceedings {icl:233, title = {An Algebra for Cross-Experiment Performance Analysis}, journal = {2004 International Conference on Parallel Processing (ICCP-04)}, year = {2004}, month = {2004-08}, address = {Montreal, Quebec, Canada}, keywords = {kojak}, author = {Fengguang Song and Felix Wolf and Nikhil Bhatia and Jack Dongarra and Shirley Moore} } @techreport {icl:231, title = {An Asynchronous Algorithm on NetSolve Global Computing System}, journal = {PRiSM - Laboratoire de recherche en informatique, Universit{\'e} de Versailles St-Quentin Technical Report}, year = {2004}, month = {2004-03}, keywords = {netsolve}, author = {Nahid Emad and S. A. Shahzadeh Fazeli and Jack Dongarra} } @conference {icl:235, title = {Automatic Blocking of QR and LU Factorizations for Locality}, booktitle = {2nd ACM SIGPLAN Workshop on Memory System Performance (MSP 2004)}, year = {2004}, month = {2004-06}, publisher = {ACM}, organization = {ACM}, address = {Washington, DC}, abstract = {QR and LU factorizations for dense matrices are important linear algebra computations that are widely used in scientific applications. To efficiently perform these computations on modern computers, the factorization algorithms need to be blocked when operating on large matrices to effectively exploit the deep cache hierarchy prevalent in today{\textquoteright}s computer memory systems. Because both QR (based on Householder transformations) and LU factorization algorithms contain complex loop structures, few compilers can fully automate the blocking of these algorithms. Though linear algebra libraries such as LAPACK provides manually blocked implementations of these algorithms, by automatically generating blocked versions of the computations, more benefit can be gained such as automatic adaptation of different blocking strategies. This paper demonstrates how to apply an aggressive loop transformation technique, dependence hoisting, to produce efficient blockings for both QR and LU with partial pivoting. We present different blocking strategies that can be generated by our optimizer and compare the performance of auto-blocked versions with manually tuned versions in LAPACK, both using reference BLAS, ATLAS BLAS and native BLAS specially tuned for the underlying machine architectures.}, keywords = {gco, papi, sans}, doi = {10.1145/1065895.1065898}, author = {Qing Yi and Ken Kennedy and Haihang You and Keith Seymour and Jack Dongarra} } @conference {icl:239, title = {Automating the Large-Scale Collection and Analysis of Performance}, booktitle = {5th LCI International Conference on Linux Clusters: The HPC Revolution}, year = {2004}, month = {2004-05}, address = {Austin, Texas}, keywords = {kojak, papi}, author = {Phil Mucci and Jack Dongarra and Rick Kufrin and Shirley Moore and Fengguang Song and Felix Wolf} } @article {icl:241, title = {Building and using a Fault Tolerant MPI implementation}, journal = {International Journal of High Performance Applications and Supercomputing (to appear)}, year = {2004}, month = {2004-00}, keywords = {ftmpi, lacsi, sans}, author = {Graham Fagg and Jack Dongarra} } @article {icl:236, title = {Cray X1 Evaluation Status Report}, journal = {Oak Ridge National Laboratory Report}, volume = {/-2004/13}, year = {2004}, month = {2004-01}, author = {Pratul Agarwal and R. A. Alexander and E. Apra and Satish Balay and Arthur S. Bland and James Colgan and Eduardo D{\textquoteright}Azevedo and Jack Dongarra and Tom Dunigan and Mark Fahey and Al Geist and M. Gordon and Robert Harrison and Dinesh Kaushik and M. Krishnakumar and Piotr Luszczek and Tony Mezzacapa and Jeff Nichols and Jarek Nieplocha and Leonid Oliker and T. Packwood and M. Pindzola and Thomas C. Schulthess and Jeffrey Vetter and James B White and T. Windus and Patrick H. Worley and Thomas Zacharia} } @inproceedings {icl:201, title = {Design of an Interactive Environment for Numerically Intensive Parallel Linear Algebra Calculations}, journal = {International Conference on Computational Science}, year = {2004}, month = {2004-06}, publisher = {Springer Verlag}, address = {Poland}, keywords = {lacsi, lfc}, doi = {10.1007/978-3-540-25944-2_35}, author = {Piotr Luszczek and Jack Dongarra}, editor = {Marian Bubak and Geert Dick van Albada and Peter M. Sloot and Jack Dongarra} } @inproceedings {icl:232, title = {Efficient Pattern Search in Large Traces through Successive Refinement}, journal = {Proceedings of Euro-Par 2004}, year = {2004}, month = {2004-08}, publisher = {Springer-Verlag}, address = {Pisa, Italy}, keywords = {kojak}, author = {Felix Wolf and Bernd Mohr and Jack Dongarra and Shirley Moore} } @inproceedings {icl:230, title = {Extending the MPI Specification for Process Fault Tolerance on High Performance Computing Systems}, journal = {Proceedings of ISC2004 (to appear)}, year = {2004}, month = {2004-06}, address = {Heidelberg, Germany}, keywords = {ftmpi, lacsi}, author = {Graham Fagg and Edgar Gabriel and George Bosilca and Thara Angskun and Zizhong Chen and Jelena Pjesivac{\textendash}Grbovic and Kevin London and Jack Dongarra} } @inproceedings {icl:237, title = {Improvements in the Efficient Composition of Applications}, journal = {IPDPS 2004, NGS Workshop (to appear)}, year = {2004}, month = {2004-00}, address = {Sante Fe}, keywords = {salsa, sans}, author = {Thomas Eidson and Victor Eijkhout and Jack Dongarra} } @inproceedings {icl:142, title = {LAPACK for Clusters Project: An Example of Self Adapting Numerical Software}, journal = {Proceedings of the 37th Annual Hawaii International Conference on System Sciences (HICSS 04{\textquoteright})}, volume = {9}, year = {2004}, month = {2004-01}, pages = {90282}, address = {Big Island, Hawaii}, keywords = {lacsi, lfc}, author = {Zizhong Chen and Jack Dongarra and Piotr Luszczek and Kenneth Roche} } @techreport {icl:200, title = {NetBuild: Automated Installation and Use of Network-Accessible Software Libraries}, journal = {ICL Technical Report}, number = {ICL-UT-04-02}, year = {2004}, month = {2004-01}, keywords = {netbuild}, author = {Keith Moore and Jack Dongarra and Shirley Moore and Eric Grosse} } @techreport {icl:234, title = {Numerically Stable Real-Number Codes Based on Random Matrices}, journal = {University of Tennessee Computer Science Department Technical Report}, volume = {{\textendash}04-526}, year = {2004}, month = {2004-10}, keywords = {ftmpi}, author = {Zizhong Chen and Jack Dongarra} } @article {icl:247, title = {An Overview of Heterogeneous High Performance and Grid Computing}, journal = {Engineering the Grid (to appear)}, year = {2004}, month = {2004-00}, publisher = {Nova Science Publishers, Inc.}, author = {Jack Dongarra and Alexey Lastovetsky}, editor = {Beniamino Di Martino and Jack Dongarra and Adolfy Hoisie and Laurence Yang and Hans Zima} } @techreport {icl:199, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Department Technical Report, CS-89-85}, year = {2004}, month = {2004-01}, author = {Jack Dongarra} } @article {icl:240, title = {Process Fault-Tolerance: Semantics, Design and Applications for High Performance Computing}, journal = {International Journal for High Performance Applications and Supercomputing (to appear)}, year = {2004}, month = {2004-04}, keywords = {ftmpi, lacsi}, author = {Graham Fagg and Edgar Gabriel and Zizhong Chen and Thara Angskun and George Bosilca and Jelena Pjesivac{\textendash}Grbovic and Jack Dongarra} } @techreport {icl:251, title = {Recovery Patterns for Iterative Methods in a Parallel Unstable Environment}, journal = {ICL Technical Report}, number = {ICL-UT-04-04}, year = {2004}, month = {2004-01}, author = {George Bosilca and Zizhong Chen and Jack Dongarra and Julien Langou} } @inproceedings {icl:238, title = {Self Adapting Linear Algebra Algorithms and Software}, journal = {IEEE Proceedings (to appear)}, year = {2004}, month = {2004-00}, keywords = {salsa, sans}, author = {James Demmel and Jack Dongarra and Victor Eijkhout and Erika Fuentes and Antoine Petitet and Rich Vuduc and Clint Whaley and Katherine Yelick} } @article {icl:167, title = {Towards an Accurate Model for Collective Communications}, journal = {International Journal of High Performance Applications, Special Issue: Automatic Performance Tuning}, volume = {18}, number = {1}, year = {2004}, month = {2004-01}, pages = {159-167}, keywords = {lacsi}, author = {Sathish Vadhiyar and Graham Fagg and Jack Dongarra} } @article {icl:245, title = {Trends in High Performance Computing}, journal = {The Computer Journal}, volume = {47}, number = {4}, year = {2004}, month = {2004-00}, pages = {399-403}, publisher = {The British Computer Society}, author = {Jack Dongarra} } @article {icl:202, title = {The Virtual Instrument: Support for Grid-enabled Scientific Simulations}, journal = {International Journal of High Performance Computing Applications}, volume = {18}, number = {1}, year = {2004}, month = {2004-01}, pages = {3-17}, author = {Henri Casanova and Thomas Bartol and Francine Berman and Adam Birnbaum and Jack Dongarra and Mark Ellisman and Marcio Faerman and Erhan Gockay and Michelle Miller and Graziano Obertelli and Stuart Pomerantz and Terry Sejnowski and Joel Stiles and Rich Wolski} } @inproceedings {icl:137, title = {Applying Aspect-Oriented Programming Concepts to a Component-based Programming Model}, journal = {IPDPS 2003, Workshop on NSF-Next Generation Software}, year = {2003}, month = {2003-03}, address = {Nice, France}, keywords = {salsa, sans}, author = {Thomas Eidson and Jack Dongarra and Victor Eijkhout} } @article {icl:156, title = {Automatic Translation of Fortran to JVM Bytecode}, journal = {Concurrency and Computation: Practice and Experience}, volume = {15}, number = {3-5}, year = {2003}, month = {2003-00}, pages = {202-207}, keywords = {f2j}, author = {Keith Seymour and Jack Dongarra} } @article {icl:169, title = {Computational Science {\textemdash} ICCS 2003}, journal = {Lecture Notes in Computer Science}, volume = {2657-2660}, year = {2003}, month = {2003-06}, publisher = {Springer-Verlag, Berlin}, address = {ICCS 2003, International Conference. Melbourne, Australia}, issn = {978-3-540-40194-0}, author = {Peter M. Sloot and David Abramson and Alexander V. Bogdanov and Jack Dongarra and Albert Zomaya and Yuriy Gorbachev} } @article {icl:175, title = {Distributed Probablistic Model-Building Genetic Algorithm}, journal = {Lecture Notes in Computer Science}, volume = {2723}, year = {2003}, month = {2003-01}, pages = {1015-1028}, publisher = {Springer-Verlag, Heidelberg}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Masaki Sano and Hisashi Shimosaka and Shigeyoshi Tsutsui and Jack Dongarra} } @article {icl:131, title = {Energy Minimization of Protein Tertiary Structure by Parallel Simulated Annealing using Genetic Crossover}, journal = {Special Issue on Biological Applications of Genetic and Evolutionary Computation (submitted)}, year = {2003}, month = {2003-03}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Shinya Ogura and Keiko Aoi and Takeshi Yoshida and Yuko Okamoto and Jack Dongarra} } @article {icl:145, title = {Evaluating The Performance Of MPI-2 Dynamic Communicators And One-Sided Communication}, journal = {Lecture Notes in Computer Science, Recent Advances in Parallel Virtual Machine and Message Passing Interface, 10th European PVM/MPI User{\textquoteright}s Group Meeting}, volume = {2840}, year = {2003}, month = {2003-09}, pages = {88-97}, publisher = {Springer-Verlag, Berlin}, address = {Venice, Italy}, keywords = {ftmpi}, author = {Edgar Gabriel and Graham Fagg and Jack Dongarra} } @conference {icl:130, title = {Experiences and Lessons Learned with a Portable Interface to Hardware Performance Counters}, booktitle = {PADTAD Workshop, IPDPS 2003}, year = {2003}, month = {2003-04}, publisher = {IEEE}, organization = {IEEE}, address = {Nice, France}, abstract = {The PAPI project has defined and implemented a cross-platform interface to the hardware counters available on most modern microprocessors. The interface has gained widespread use and acceptance from hardware vendors, users, and tool developers. This paper reports on experiences with the community-based open-source effort to define the PAPI specification and implement it on a variety of platforms. Collaborations with tool developers who have incorporated support for PAPI are described. Issues related to interpretation and accuracy of hardware counter data and to the overheads of collecting this data are discussed. The paper concludes with implications for the design of the next version of PAPI.}, keywords = {lacsi, papi}, isbn = {0-7695-1926-1}, author = {Jack Dongarra and Kevin London and Shirley Moore and Phil Mucci and Dan Terpstra and Haihang You and Min Zhou} } @inproceedings {icl:153, title = {Fault Tolerant Communication Library and Applications for High Performance Computing}, journal = {Los Alamos Computer Science Institute (LACSI) Symposium 2003 (presented)}, year = {2003}, month = {2003-10}, address = {Santa Fe, NM}, keywords = {ftmpi, lacsi}, author = {Graham Fagg and Edgar Gabriel and Zizhong Chen and Thara Angskun and George Bosilca and Antonin Bukovsky and Jack Dongarra} } @inproceedings {icl:144, title = {A Fault-Tolerant Communication Library for Grid Environments}, journal = {17th Annual ACM International Conference on Supercomputing (ICS{\textquoteright}03) International Workshop on Grid Computing and e-Science}, year = {2003}, month = {2003-06}, address = {San Francisco}, keywords = {ftmpi, lacsi}, author = {Edgar Gabriel and Graham Fagg and Antonin Bukovsky and Thara Angskun and Jack Dongarra} } @techreport {icl:208, title = {Finite-choice Algorithm Optimization in Conjugate Gradients (LAPACK Working Note 159)}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-03-502}, year = {2003}, month = {2003-01}, author = {Jack Dongarra and Victor Eijkhout} } @article {icl:133, title = {GrADSolve - A Grid-based RPC System for Remote Invocation of Parallel Software}, journal = {Journal of Parallel and Distributed Computing (submitted)}, year = {2003}, month = {2003-03}, keywords = {grads}, author = {Sathish Vadhiyar and Jack Dongarra} } @inproceedings {icl:172, title = {GrADSolve - RPC for High Performance Computing on the Grid}, journal = {Lecture Notes in Computer Science, Proceedings of the 9th International Euro-Par Conference}, volume = {2790}, year = {2003}, month = {2003-01}, pages = {394-403}, publisher = {Springer-Verlag, Berlin}, address = {Klagenfurt, Austria}, keywords = {netsolve}, doi = {10.1007/978-3-540-45209-6_58}, author = {Sathish Vadhiyar and Jack Dongarra and Asim YarKhan}, editor = {Harald Kosch and Laszlo Boszormenyi and Hermann Hellwagner} } @article {icl:168, title = {High Performance Computing for Computational Science}, journal = {Lecture Notes in Computer Science}, volume = {2565}, year = {2003}, month = {2003-01}, publisher = {Springer-Verlag, Berlin}, address = {VECPAR 2002, 5th International Conference June 26-28, 2002}, author = {Jose Palma and Jack Dongarra and Vicente Hern{\'a}ndez}, editor = {Antonio Augusto Sousa} } @inproceedings {icl:180, title = {High Performance Computing Trends and Self Adapting Numerial Software}, journal = {Lecture Notes in Computer Science, High Performance Computing, 5th International Symposium ISHPC}, volume = {2858}, year = {2003}, month = {2003-01}, pages = {1-9}, publisher = {Springer-Verlag, Heidelberg}, address = {Tokyo-Odaiba, Japan}, author = {Jack Dongarra} } @inproceedings {icl:178, title = {High Performance Computing Trends, Supercomputers, Clusters, and Grids}, journal = {Information Processing Society of Japan Symposium Series}, volume = {2003}, number = {14}, year = {2003}, month = {2003-01}, pages = {55-58}, author = {Jack Dongarra} } @article {icl:143, title = {NetSolve: Past, Present, and Future - A Look at a Grid Enabled Server}, journal = {Making the Global Infrastructure a Reality}, year = {2003}, month = {2003-00}, publisher = {Wiley Publishing}, keywords = {netsolve}, author = {Sudesh Agrawal and Jack Dongarra and Keith Seymour and Sathish Vadhiyar}, editor = {Francine Berman and Geoffrey Fox and Anthony Hey} } @inproceedings {icl:179, title = {Optimization of Injection Schedule of Diesel Engine Using GridRPC}, journal = {Information Processing Society of Japan Symposium Series}, volume = {2003}, number = {14}, year = {2003}, month = {2003-01}, pages = {189-197}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Junji Sawada and Jack Dongarra} } @inproceedings {icl:128, title = {Optimization Problem Solving System using Grid RPC}, journal = {3rd IEEE/ACM International Symposium on Cluster Computing and the Grid}, year = {2003}, month = {2003-03}, address = {Tokyo, Japan}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Hisashi Shimosaka and Jack Dongarra} } @inproceedings {icl:171, title = {Optimizing Performance and Reliability in Distributed Computing Systems Through Wide Spectrum Storage}, journal = {Proceedings of the IPDPS 2003, NGS Workshop}, year = {2003}, month = {2003-01}, pages = {209}, address = {Nice, France}, author = {James Plank and Micah Beck and Jack Dongarra and Rich Wolski and Henri Casanova} } @article {icl:205, title = {A Parallel Implementation of the Nonsymmetric QR Algorithm for Distributed Memory Architectures}, journal = {SIAM Journal on Scientific Computing}, volume = {24}, number = {1}, year = {2003}, month = {2003-01}, pages = {284-311}, author = {Greg Henry and David Watkins and Jack Dongarra} } @conference {icl:159, title = {Performance Instrumentation and Measurement for Terascale Systems}, booktitle = {ICCS 2003 Terascale Workshop}, year = {2003}, month = {2003-06}, publisher = {Springer, Berlin, Heidelberg}, organization = {Springer, Berlin, Heidelberg}, address = {Melbourne, Australia}, abstract = {As computer systems grow in size and complexity, tool support is needed to facilitate the efficient mapping of large-scale applications onto these systems. To help achieve this mapping, performance analysis tools must provide robust performance observation capabilities at all levels of the system, as well as map low-level behavior to high-level program constructs. Instrumentation and measurement strategies, developed over the last several years, must evolve together with performance analysis infrastructure to address the challenges of new scalable parallel systems.}, keywords = {papi}, doi = {https://doi.org/10.1007/3-540-44864-0_6}, author = {Jack Dongarra and Allen D. Malony and Shirley Moore and Phil Mucci and Sameer Shende} } @article {icl:170, title = {Recent Advances in Parallel Virtual Machine and Message Passing Interface}, journal = {Lecture Notes in Computer Science}, volume = {2840}, year = {2003}, month = {2003-01}, publisher = {Springer-Verlag, Berlin}, author = {Jack Dongarra and Domenico Laforenza and S. Orlando} } @inproceedings {icl:158, title = {Scalable, Trustworthy Network Computing Using Untrusted Intermediaries: A Position Paper}, journal = {DOE/NSF Workshop on New Directions in Cyber-Security in Large-Scale Networks: Development Obstacles}, year = {2003}, month = {2003-03}, address = {National Conference Center - Landsdowne, Virginia}, keywords = {netsolve}, author = {Micah Beck and Jack Dongarra and Victor Eijkhout and Mike Langston and Terry Moore and James Plank} } @article {icl:138, title = {Scheduling in the Grid Application Development Software Project}, journal = {Resource Management in the Grid}, year = {2003}, month = {2003-03}, publisher = {Kluwer Publishers}, keywords = {grads}, author = {Holly Dail and Otto Sievert and Francine Berman and Henri Casanova and Asim YarKhan and Sathish Vadhiyar and Jack Dongarra and Chuang Liu and Lingyun Yang and Dave Angulo and Ian Foster} } @article {icl:135, title = {Self Adaptability in Grid Computing}, journal = {Concurrency: Practice and Experience (submitted)}, year = {2003}, month = {2003-03}, keywords = {sans}, author = {Sathish Vadhiyar and Jack Dongarra} } @article {icl:184, title = {Self Adapting Numerical Algorithm for Next Generation Applications}, journal = {International Journal of High Performance Computing Applications}, volume = {17}, number = {2}, year = {2003}, month = {2003-01}, pages = {125-132}, keywords = {lacsi, sans}, author = {Jack Dongarra and Victor Eijkhout} } @article {icl:136, title = {Self Adapting Software for Numerical Linear Algebra and LAPACK for Clusters}, journal = {Parallel Computing}, volume = {29}, number = {11-12}, year = {2003}, month = {2003-11}, pages = {1723-1743}, keywords = {lacsi, lfc, sans}, author = {Zizhong Chen and Jack Dongarra and Piotr Luszczek and Kenneth Roche} } @techreport {icl:209, title = {Self Adapting Software for Numerical Linear Algebra and LAPACK for Clusters (LAPACK Working Note 160)}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-03-499}, year = {2003}, month = {2003-01}, keywords = {lacsi}, author = {Zizhong Chen and Jack Dongarra and Piotr Luszczek and Kenneth Roche} } @article {icl:127, title = {Self-Adapting Numerical Software and Automatic Tuning of Heuristics}, journal = {Lecture Notes in Computer Science}, volume = {2660}, year = {2003}, month = {2003-06}, pages = {759-770}, publisher = {Springer Verlag}, address = {Melbourne, Australia}, keywords = {salsa, sans}, author = {Jack Dongarra and Victor Eijkhout} } @article {icl:139, title = {The Semantic Conference Organizer}, journal = {Statistical Data Mining and Knowledge Discovery}, year = {2003}, month = {2003-00}, publisher = {CRC Press}, keywords = {netsolve}, author = {Kevin Heinrich and Michael Berry and Jack Dongarra and Sathish Vadhiyar}, editor = {Hamparsum Bozdogan} } @inproceedings {icl:129, title = {A Simple Installation and Administration Tool for Large-scaled PC Cluster System}, journal = {ClusterWorld Conference and Expo}, year = {2003}, month = {2003-03}, address = {San Jose, CA}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Kenzo Kodama and Junichi Uekawa and Jack Dongarra} } @article {icl:132, title = {SRS - A Framework for Developing Malleable and Migratable Parallel Software}, journal = {Parallel Processing Letters}, volume = {13}, number = {2}, year = {2003}, month = {2003-06}, pages = {291-312}, keywords = {grads}, author = {Sathish Vadhiyar and Jack Dongarra} } @inproceedings {icl:177, title = {Static Scheduling for ScaLAPACK on the Grid Using Genetic Algorithm}, journal = {Information Processing Society of Japan Symposium Series}, volume = {2003}, number = {14}, year = {2003}, month = {2003-01}, pages = {3-10}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Hiroki Saito and Yusuke Tanimura and Jack Dongarra} } @article {icl:174, title = {VisPerf: Monitoring Tool for Grid Computing}, journal = {Lecture Notes in Computer Science}, volume = {2659}, year = {2003}, month = {2003-00}, pages = {233-243}, publisher = {Springer Verlag, Heidelberg}, keywords = {netsolve}, author = {DongWoo Lee and Jack Dongarra}, editor = {R. S. Ramakrishna} } @article {icl:92, title = {Active Netlib: An Active Mathematical Software Collection for Inquiry-based Computational Science and Engineering Education}, journal = {Journal of Digital Information special issue on Interactivity in Digital Libraries}, volume = {2}, number = {4}, year = {2002}, month = {2002-00}, keywords = {activenetlib, rib}, author = {Shirley Moore and A.J. Baker and Jack Dongarra and Christian Halloy and Chung Ng} } @article {icl:119, title = {Adaptive Scheduling for Task Farming with Grid Middleware}, journal = {International Journal of Supercomputer Applications and High-Performance Computing}, volume = {13}, number = {3}, year = {2002}, month = {2002-10}, pages = {231-240}, author = {Henri Casanova and Myung Ho Kim and James Plank and Jack Dongarra} } @article {icl:121, title = {Algorithmic Redistribution Methods for Block Cyclic Decompositions}, journal = {IEEE Transactions on Parallel and Distributed Computing}, volume = {10}, number = {12}, year = {2002}, month = {2002-10}, pages = {201-220}, author = {Antoine Petitet and Jack Dongarra} } @article {icl:80, title = {Automatic Optimisation of Parallel Linear Algebra Routines in Systems with Variable Load}, journal = {EuroPar 2002}, year = {2002}, month = {2002-08}, address = {Paderborn, Germany}, author = {Javier Cuenca and Domingo Giminez and Jos{\'e} Gonz{\'a}lez and Jack Dongarra and Kenneth Roche} } @article {icl:109, title = {Biannual Top-500 Computer Lists Track Changing Environments for Scientific Computing}, journal = {SIAM News}, volume = {34}, number = {9}, year = {2002}, month = {2002-10}, keywords = {top500}, author = {Jack Dongarra and Hans Meuer and Horst D. Simon and Erich Strohmaier} } @article {icl:113, title = {A Comparison of Parallel Solvers for General Narrow Banded Linear Systems}, journal = {Parallel and Distributed Computing Practices}, volume = {2}, year = {2002}, month = {2002-10}, pages = {385-400}, author = {Peter Arbenz and Andrew Cleary and Jack Dongarra and Markus Hegland} } @inproceedings {icl:93, title = {Deploying Parallel Numerical Library Routines to Cluster Computing in a Self Adapting Fashion}, journal = {Parallel Computing: Advances and Current Issues:Proceedings of the International Conference ParCo2001}, year = {2002}, month = {2002-01}, publisher = {Imperial College Press}, address = {London, England}, keywords = {lfc, sans}, author = {Kenneth Roche and Jack Dongarra}, editor = {Gerhard R. Joubert and Almerica Murli and Frans Peters and Marco Vanneschi} } @inproceedings {icl:98, title = {Experiments with Scheduling Using Simulated Annealing in a Grid Environment}, journal = {Grid Computing - GRID 2002, Third International Workshop}, volume = {2536}, year = {2002}, month = {2002-11}, pages = {232-242}, publisher = {Springer}, address = {Baltimore, MD}, keywords = {grads}, author = {Asim YarKhan and Jack Dongarra}, editor = {Manish Parashar} } @techreport {icl:97, title = {GridRPC: A Remote Procedure Call API for Grid Computing}, journal = {ICL Technical Report}, number = {ICL-UT-02-06}, year = {2002}, month = {2002-11}, author = {Keith Seymour and Hidemoto Nakada and Satoshi Matsuoka and Jack Dongarra and Craig Lee and Henri Casanova} } @article {icl:213, title = {HARNESS Fault Tolerant MPI Design, Usage and Performance Issues}, journal = {Future Generation Computer Systems}, volume = {18}, number = {8}, year = {2002}, month = {2002-01}, pages = {1127-1142}, author = {Graham Fagg and Jack Dongarra} } @article {icl:207, title = {Innovations of the NetSolve Grid Computing System}, journal = {Concurrency: Practice and Experience}, volume = {14}, number = {13-15}, year = {2002}, month = {2002-01}, pages = {1457-1479}, keywords = {netsolve}, author = {Dorian Arnold and Henri Casanova and Jack Dongarra} } @article {icl:10, title = {An Iterative Solver Benchmark}, journal = {Scientific Programming (to appear)}, year = {2002}, month = {2002-00}, author = {Jack Dongarra and Victor Eijkhout and Henk van der Vorst} } @article {icl:116, title = {JLAPACK - Compiling LAPACK Fortran to Java}, journal = {Scientific Programming}, volume = {7}, number = {2}, year = {2002}, month = {2002-10}, pages = {111-138}, keywords = {f2j}, author = {David Doolin and Jack Dongarra and Keith Seymour} } @article {icl:115, title = {The Marketplace for High-Performance Computers}, journal = {Parallel Computing}, volume = {25}, number = {13-14}, year = {2002}, month = {2002-10}, pages = {1517-1545}, author = {Erich Strohmaier and Jack Dongarra and Hans Meuer and Horst D. Simon} } @inproceedings {icl:94, title = {A Metascheduler For The Grid}, journal = {Proceedings of the 11th IEEE International Symposium on High Performance Distributed Computing (HPDC 2002)}, year = {2002}, month = {2002-07}, pages = {343-351}, publisher = {IEEE Computer Society}, address = {Edinburgh, Scotland}, keywords = {grads}, author = {Sathish Vadhiyar and Jack Dongarra} } @article {icl:101, title = {Middleware for the Use of Storage in Communication}, journal = {Parallel Computing}, volume = {28}, number = {12}, year = {2002}, month = {2002-08}, pages = {1773-1788}, keywords = {netsolve}, author = {Micah Beck and Dorian Arnold and Alessandro Bassi and Francine Berman and Henri Casanova and Jack Dongarra and Terry Moore and Graziano Obertelli and James Plank and Martin Swany and Sathish Vadhiyar and Rich Wolski} } @article {icl:110, title = {NetBuild: Transparent Cross-Platform Access to Computational Software Libraries}, journal = {Concurrency and Computation: Practice and Experience, Special Issue: Grid Computing Environments}, volume = {14}, number = {13-15}, year = {2002}, month = {2002-11}, pages = {1445-1456}, keywords = {netbuild}, author = {Keith Moore and Jack Dongarra} } @article {icl:108, title = {Numerical Libraries and Tools for Scalable Parallel Cluster Computing}, journal = {International Journal of High Performance Applications and Supercomputing}, volume = {15}, number = {2}, year = {2002}, month = {2002-10}, pages = {175-180}, author = {Shirley Browne and Jack Dongarra and Anne Trefethen} } @article {icl:104, title = {Optimization System Using Grid RPC}, journal = {Meeting of the Japan Society of Mechanical Engineers}, year = {2002}, month = {2002-10}, address = {Kyoto University, Kyoto, Japan}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Hisashi Shimosaka and Yusuke Tanimura and Jack Dongarra} } @inproceedings {icl:187, title = {Overview of GridRPC: A Remote Procedure Call API for Grid Computing}, journal = {Proceedings of the Third International Workshop on Grid Computing}, year = {2002}, month = {2002-01}, pages = {274-278}, author = {Keith Seymour and Hidemoto Nakada and Satoshi Matsuoka and Jack Dongarra and Craig Lee and Henri Casanova}, editor = {Manish Parashar} } @article {icl:114, title = {A Parallel Implementation of the Nonsymmetric QR Algorithm for Disitributed Memory Architectures}, journal = {SIAM Journal on Scientific Computing}, volume = {16}, number = {2}, year = {2002}, month = {2002-10}, pages = {284-311}, author = {Greg Henry and David Watkins and Jack Dongarra} } @article {icl:117, title = {Parallelizing the Divide and Conquer Algorithm for the Symmetric Tridiagonal Eigenvalue Problem on Distributed Memory Architectures}, journal = {SIAM Journal on Scientific Computing}, volume = {6}, number = {20}, year = {2002}, month = {2002-10}, pages = {2223-2236}, author = {Francoise Tisseur and Jack Dongarra} } @techreport {icl:102, title = {Self-adapting Numerical Software for Next Generation Applications (LAPACK Working Note 157)}, journal = {ICL Technical Report}, number = {ICL-UT-02-07}, year = {2002}, month = {2002-00}, keywords = {salsa, sans}, author = {Jack Dongarra and Victor Eijkhout} } @article {icl:120, title = {Stochastic Performance Prediction for Iterative Algorithms in Distributed Environments}, journal = {Journal of Parallel and Distributed Computing}, volume = {98}, number = {1}, year = {2002}, month = {2002-10}, pages = {68-91}, author = {Henri Casanova and Michael G. Thomason and Jack Dongarra} } @inproceedings {icl:79, title = {Toward a Framework for Preparing and Executing Adaptive Grid Programs}, journal = {International Parallel and Distributed Processing Symposium: IPDPS 2002 Workshops}, year = {2002}, month = {2002-04}, pages = {0171}, address = {Fort Lauderdale, FL}, keywords = {grads}, author = {Ken Kennedy and John Mellor-Crummey and Keith Cooper and Linda Torczon and Francine Berman and Andrew Chien and Dave Angulo and Ian Foster and Dennis Gannon and Lennart Johnsson and Carl Kesselman and Jack Dongarra and Sathish Vadhiyar} } @article {icl:103, title = {Truss Structural Optimization Using NetSolve System}, journal = {Meeting of the Japan Society of Mechanical Engineers}, year = {2002}, month = {2002-10}, address = {Kyoto University, Kyoto, Japan}, keywords = {netsolve}, author = {Tomoyuki Hiroyasu and Mitsunori Miki and Hisashi Shimosaka and Masaki Sano and Yusuke Tanimura and Yasunari Mimura and Shinobu Yoshimura and Jack Dongarra} } @article {icl:125, title = {An Updated Set of Basic Linear Algebra Subprograms (BLAS)}, journal = {ACM Transactions on Mathematical Software}, volume = {28}, number = {2}, year = {2002}, month = {2002-12}, pages = {135-151}, doi = {10.1145/567806.567807}, author = {Susan Blackford and James Demmel and Jack Dongarra and Iain Duff and Sven Hammarling and Greg Henry and Michael Heroux and Linda Kaufman and Andrew Lumsdaine and Antoine Petitet and Roldan Pozo and Karin Remington and Clint Whaley} } @techreport {icl:96, title = {Users{\textquoteright} Guide to NetSolve v1.4.1}, journal = {ICL Technical Report}, number = {ICL-UT-02-05}, year = {2002}, month = {2002-06}, keywords = {netsolve}, author = {Sudesh Agrawal and Dorian Arnold and Susan Blackford and Jack Dongarra and Michelle Miller and Kiran Sagi and Zhiao Shi and Keith Seymour and Sathish Vadhiyar} } @article {icl:95, title = {The Virtual Instrument: Support for Grid-enabled Scientific Simulations}, journal = {Journal of Parallel and Distributed Computing (submitted)}, year = {2002}, month = {2002-10}, author = {Henri Casanova and Thomas Bartol and Francine Berman and Adam Birnbaum and Jack Dongarra and Mark Ellisman and Marcio Faerman and Erhan Gockay and Michelle Miller and Graziano Obertelli and Stuart Pomerantz and Terry Sejnowski and Joel Stiles and Rich Wolski} } @article {icl:85, title = {Automated Empirical Optimization of Software and the ATLAS Project}, journal = {Parallel Computing}, volume = {27}, number = {1-2}, year = {2001}, month = {2001-01}, pages = {3-25}, keywords = {atlas}, author = {Clint Whaley and Antoine Petitet and Jack Dongarra} } @inproceedings {icl:22, title = {Automatic Translation of Fortran to JVM Bytecode}, journal = {Joint ACM Java Grande - ISCOPE 2001 Conference (submitted)}, year = {2001}, month = {2001-06}, address = {Stanford University, California}, keywords = {f2j}, author = {Keith Seymour and Jack Dongarra} } @article {icl:6, title = {Basic Linear Algebra Subprograms (BLAS)}, journal = {(an update), submitted to ACM TOMS}, year = {2001}, month = {2001-02}, author = {Susan Blackford and James Demmel and Jack Dongarra and Iain Duff and Sven Hammarling and Greg Henry and Michael Heroux and Linda Kaufman and Andrew Lumsdaine and Antoine Petitet and Roldan Pozo and Karin Remington and Clint Whaley} } @article {icl:87, title = {On the Convergence of Computational and Data Grids}, journal = {Parallel Processing Letters}, volume = {11}, number = {2-3}, year = {2001}, month = {2001-01}, pages = {187-202}, keywords = {netsolve}, author = {Dorian Arnold and Sathish Vadhiyar and Jack Dongarra} } @conference {icl:15, title = {End-user Tools for Application Performance Analysis, Using Hardware Counters}, booktitle = {International Conference on Parallel and Distributed Computing Systems}, year = {2001}, month = {2001-08}, address = {Dallas, TX}, abstract = {One purpose of the end-user tools described in this paper is to give users a graphical representation of performance information that has been gathered by instrumenting an application with the PAPI library. PAPI is a project that specifies a standard API for accessing hardware performance counters available on most modern microprocessors. These counters exist as a small set of registers that count \"events\", which are occurrences of specific signals and states related to a processor{\textquoteright}s function. Monitoring these events facilitates correlation between the structure of source/object code and the efficiency of the mapping of that code to the underlying architecture. The perfometer tool developed by the PAPI project provides a graphical view of this information, allowing users to quickly see where performance bottlenecks are in their application. Only one function call has to be added by the user to their program to take advantage of perfometer. This makes it quick and simple to add and remove instrumentation from a program. Also, perfometer allows users to change the \"event\" they are monitoring. Add the ability to monitor parallel applications, set alarms and a Java front-end that can run anywhere, and this gives the user a powerful tool for quickly discovering where and why a bottleneck exists. A number of third-party tools for analyzing performance of message-passing and/or threaded programs have also incorporated support for PAPI so as to be able to display and analyze hardware counter data from their interfaces.}, keywords = {papi}, author = {Kevin London and Jack Dongarra and Shirley Moore and Phil Mucci and Keith Seymour and T. Spencer} } @inproceedings {icl:203, title = {Fault Tolerant MPI for the HARNESS Meta-Computing System}, journal = {Proceedings of International Conference of Computational Science - ICCS 2001, Lecture Notes in Computer Science}, volume = {2073}, year = {2001}, month = {2001-00}, pages = {355-366}, publisher = {Springer Verlag}, address = {Berlin}, keywords = {ftmpi, harness}, doi = {10.1007/3-540-45545-0_44}, author = {Graham Fagg and Antonin Bukovsky and Jack Dongarra}, editor = {Benjoe A. Juliano and R. Renner and K. Tan} } @article {icl:90, title = {The GrADS Project: Software Support for High-Level Grid Application Development}, journal = {International Journal of High Performance Applications and Supercomputing}, volume = {15}, number = {4}, year = {2001}, month = {2001-01}, pages = {327-344}, keywords = {grads}, author = {Francine Berman and Andrew Chien and Keith Cooper and Jack Dongarra and Ian Foster and Dennis Gannon and Lennart Johnsson and Ken Kennedy and Carl Kesselman and John Mellor-Crummey and Dan Reed and Linda Torczon and Rich Wolski} } @inproceedings {icl:111, title = {Grid-Enabling Problem Solving Environments: A Case Study of SCIRUN and NetSolve}, journal = {Proceedings of the High Performance Computing Symposium (HPC 2001) in 2001 Advanced Simulation Technologies Conference}, year = {2001}, month = {2001-04}, publisher = {Society for Modeling and Simulation International}, address = {Seattle, Washington}, keywords = {netsolve}, author = {Michelle Miller and Christopher Moulding and Jack Dongarra and Christopher Johnson} } @article {icl:86, title = {HARNESS and Fault Tolerant MPI}, journal = {Parallel Computing}, volume = {27}, number = {11}, year = {2001}, month = {2001-01}, pages = {1479-1496}, author = {Graham Fagg and Antonin Bukovsky and Jack Dongarra} } @article {icl:222, title = {High Performance Computing Trends}, journal = {HERMIS}, volume = {2}, year = {2001}, month = {2001-11}, pages = {155-163}, author = {Jack Dongarra and Hans Meuer and Horst D. Simon and Erich Strohmaier} } @article {icl:223, title = {Iterative Solver Benchmark (LAPACK Working Note 152)}, journal = {Scientific Programming}, volume = {9}, number = {4}, year = {2001}, month = {2001-00}, pages = {223-231}, author = {Jack Dongarra and Victor Eijkhout and Henk van der Vorst} } @article {icl:4, title = {Logistical Computing and Internetworking: Middleware for the Use of Storage in Communication}, journal = {submitted to SC2001}, year = {2001}, month = {2001-11}, address = {Denver, Colorado}, keywords = {netsolve}, author = {Micah Beck and Dorian Arnold and Alessandro Bassi and Francine Berman and Henri Casanova and Jack Dongarra and Terry Moore and Graziano Obertelli and James Plank and Martin Swany and Sathish Vadhiyar and Rich Wolski} } @article {icl:221, title = {Measuring Computer Performance: A Practioner{\textquoteright}s Guide}, journal = {SIAM Review (book review)}, volume = {43}, number = {2}, year = {2001}, month = {2001-00}, pages = {383-384}, author = {Jack Dongarra} } @techreport {icl:18, title = {NetBuild}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-O1-461}, year = {2001}, month = {2001-01}, keywords = {netbuild}, author = {Keith Moore and Jack Dongarra} } @inproceedings {icl:7, title = {Network-Enabled Server Systems: Deploying Scientific Simulations on the Grid}, journal = {2001 High Performance Computing Symposium (HPC{\textquoteright}01), part of the Advance Simulation Technologies Conference}, year = {2001}, month = {2001-04}, address = {Seattle, Washington}, author = {Henri Casanova and Satoshi Matsuoka and Jack Dongarra} } @article {icl:224, title = {Network-Enabled Solvers: A Step Toward Grid-Based Computing}, journal = {SIAM News}, volume = {34}, number = {10}, year = {2001}, month = {2001-12}, author = {Jack Dongarra} } @article {icl:89, title = {Numerical Libraries and The Grid}, journal = {International Journal of High Performance Applications and Supercomputing}, volume = {15}, number = {4}, year = {2001}, month = {2001-01}, pages = {359-374}, keywords = {grads}, author = {Antoine Petitet and Susan Blackford and Jack Dongarra and Brett Ellis and Graham Fagg and Kenneth Roche and Sathish Vadhiyar} } @techreport {icl:21, title = {Numerical Libraries and The Grid: The Grads Experiments with ScaLAPACK}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-01-460}, year = {2001}, month = {2001-01}, keywords = {grads, scalapack}, author = {Antoine Petitet and Susan Blackford and Jack Dongarra and Brett Ellis and Graham Fagg and Kenneth Roche and Sathish Vadhiyar} } @article {icl:88, title = {Numerical Libraries and Tools for Scalable Parallel Cluster Computing}, journal = {International Journal of High Performance Applications and Supercomputing}, volume = {15}, number = {2}, year = {2001}, month = {2001-01}, pages = {175-180}, author = {Jack Dongarra and Shirley Moore and Anne Trefethen} } @article {icl:23, title = {Overview of High Performance Computers}, journal = {Handbook of Massive Data Sets}, year = {2001}, month = {2001-01}, pages = {791-852}, publisher = {Kluwer Academic Publishers}, author = {Aad J. van der Steen and Jack Dongarra}, editor = {James Abello and Panos Pardalos and Mauricio Resende} } @inproceedings {icl:78, title = {Performance Modeling for Self Adapting Collective Communications for MPI}, journal = {LACSI Symposium 2001}, year = {2001}, month = {2001-10}, address = {Santa Fe, NM}, keywords = {ftmpi}, author = {Sathish Vadhiyar and Graham Fagg and Jack Dongarra} } @techreport {icl:9, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Technical Report}, number = {CS-89-85}, year = {2001}, month = {2001-01}, author = {Jack Dongarra} } @article {icl:12, title = {The Quest for Petascale Computing}, journal = {Computing in Science and Engineering}, volume = {3}, number = {3}, year = {2001}, month = {2001-05}, pages = {32-39}, author = {Jack Dongarra and David W. Walker} } @article {icl:81, title = {Recursive Approach in Sparse Matrix LU Factorization}, journal = {Scientific Programming}, volume = {9}, number = {1}, year = {2001}, month = {2001-00}, pages = {51-60}, author = {Jack Dongarra and Victor Eijkhout and Piotr Luszczek} } @article {icl:20, title = {Review of Performance Analysis Tools for MPI Parallel Programs}, journal = {European Parallel Virtual Machine / Message Passing Interface Users{\textquoteright} Group Meeting, Lecture Notes in Computer Science 2131}, year = {2001}, month = {2001-09}, pages = {241-248}, publisher = {Springer Verlag, Berlin}, address = {Greece}, abstract = {In order to produce MPI applications that perform well on today{\textquoteright}s parallel architectures, programmers need effective tools for collecting and analyzing performance data. A variety of such tools, both commercial and research, are becoming available. This paper reviews and evaluations the available cross-platform MPI performance analysis tools.}, keywords = {papi}, doi = {https://doi.org/10.1007/3-540-45417-9_34}, author = {Shirley Moore and David Cronk and Kevin London and Jack Dongarra} } @techreport {icl:46, title = {RIBAPI - Repository in a Box Application Programmer{\textquoteright}s Interface}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-00-438}, year = {2001}, month = {2001-00}, keywords = {rib}, author = {Jeremy Millar and Paul McMahan and Jack Dongarra} } @article {icl:82, title = {Telescoping Languages: A Strategy for Automatic Generation of Scientific Problem-Solving Systems from Annotated Libraries}, journal = {Journal of Parallel and Distributed Computing}, volume = {61}, number = {12}, year = {2001}, month = {2001-12}, pages = {1803-1826}, author = {Ken Kennedy and Bradley Broom and Keith Cooper and Jack Dongarra and Rob Fowler and Dennis Gannon and Lennart Johnsson and John Mellor-Crummey and Linda Torczon} } @conference {icl:11, title = {Using PAPI for Hardware Performance Monitoring on Linux Systems}, booktitle = {Conference on Linux Clusters: The HPC Revolution}, year = {2001}, month = {2001-06}, publisher = {Linux Clusters Institute}, organization = {Linux Clusters Institute}, address = {Urbana, Illinois}, abstract = {PAPI is a specification of a cross-platform interface to hardware performance counters on modern microprocessors. These counters exist as a small set of registers that count events, which are occurrences of specific signals related to a processor{\textquoteright}s function. Monitoring these events has a variety of uses in application performance analysis and tuning. The PAPI specification consists of both a standard set of events deemed most relevant for application performance tuning, as well as both high-level and low-level sets of routines for accessing the counters. The high level interface simply provides the ability to start, stop, and read sets of events, and is intended for the acquisition of simple but accurate measurement by application engineers. The fully programmable low-level interface provides sophisticated options for controlling the counters, such as setting thresholds for interrupt on overflow, as well as access to all native counting modes and events, and is intended for third-party tool writers or users with more sophisticated needs. PAPI has been implemented on a number of platforms, including Linux/x86 and Linux/IA-64. The Linux/x86 implementation requires a kernel patch that provides a driver for the hardware counters. The driver memory maps the counter registers into user space and allows virtualizing the counters on a perprocess or per-thread basis. The kernel patch is being proposed for inclusion in the main Linux tree. The PAPI library provides access on Linux platforms not only to the standard set of events mentioned above but also to all the Linux/x86 and Linux/IA-64 native events. PAPI has been installed and is in use, either directly or through incorporation into third-party end-user performance analysis tools, on a number of Linux clusters, including the New Mexico LosLobos cluster and Linux clusters at NCSA and the University of Tennessee being used for the GrADS (Grid Application Development Software) project. }, keywords = {papi}, author = {Jack Dongarra and Kevin London and Shirley Moore and Phil Mucci and Dan Terpstra} } @techreport {icl:49, title = {Automated Empirical Optimizations of Software and the ATLAS Project (LAPACK Working Note 147)}, journal = {University of Tennessee Computer Science Department Technical Report,}, number = {UT-CS-00-448}, year = {2000}, month = {2000-09}, keywords = {atlas}, author = {Clint Whaley and Antoine Petitet and Jack Dongarra} } @inproceedings {icl:48, title = {Automatically Tuned Collective Communications}, journal = {Proceedings of SuperComputing 2000 (SC{\textquoteright}2000)}, year = {2000}, month = {2000-11}, address = {Dallas, TX}, keywords = {ftmpi}, author = {Sathish Vadhiyar and Graham Fagg and Jack Dongarra} } @techreport {icl:47, title = {Design and Implementation of NetSolve using DCOM as the Remoting Layer}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {UT-CS-00-440}, year = {2000}, month = {2000-05}, keywords = {netsolve}, author = {Ganapathy Raman and Jack Dongarra} } @article {icl:35, title = {The Design and Implementation of the Parallel Out of Core ScaLAPACK LU, QR, and Cholesky Factorization Routines}, journal = {Concurrency: Practice and Experience}, volume = {12}, number = {15}, year = {2000}, month = {2000-01}, pages = {1481-1493}, author = {Eduardo D{\textquoteright}Azevedo and Jack Dongarra} } @inproceedings {icl:27, title = {Developing an Architecture to Support the Implementation and Development of Scientific Computing Applications}, journal = {to appear in Proceedings of Working Conference 8: Software Architecture for Scientific Computing Applications}, year = {2000}, month = {2000-10}, address = {Ottawa, Canada}, keywords = {netsolve}, author = {Dorian Arnold and Jack Dongarra} } @inproceedings {icl:43, title = {FT-MPI: Fault Tolerant MPI, Supporting Dynamic Applications in a Dynamic World}, journal = {Lecture Notes in Computer Science: Proceedings of EuroPVM-MPI 2000}, year = {2000}, month = {2000-01}, pages = {V1908,346-353}, address = {(Hungary: Springer Verlag, 2000)}, keywords = {ftmpi}, author = {Graham Fagg and Jack Dongarra} } @techreport {icl:30, title = {The GrADS Project: Software Support for High-Level Grid Application Development}, journal = {Technical Report}, year = {2000}, month = {2000-02}, keywords = {grads}, author = {Francine Berman and Andrew Chien and Keith Cooper and Jack Dongarra and Ian Foster and Dennis Gannon and Lennart Johnsson and Ken Kennedy and Carl Kesselman and Dan Reed and Linda Torczon and Rich Wolski} } @inproceedings {icl:41, title = {High Performance Computing Today}, journal = {FOMMS 2000: Foundations of Molecular Modeling and Simulation Conference (to appear)}, year = {2000}, month = {2000-01}, author = {Jack Dongarra and Hans Meuer and Horst D. Simon and Erich Strohmaier} } @article {icl:118, title = {Message Passing Software Systems}, journal = {Encyclopedia of Electrical and Engineering, Supplement 1}, year = {2000}, month = {2000-00}, publisher = {John Wiley \& Sons, Inc.}, keywords = {ftmpi}, author = {Jack Dongarra and Graham Fagg and Rolf Hempel and David W. Walker}, editor = {J. Webster} } @inproceedings {icl:28, title = {The NetSolve Environment: Progressing Towards the Seamless Grid}, journal = {2000 International Conference on Parallel Processing (ICPP-2000)}, year = {2000}, month = {2000-08}, address = {Toronto, Canada}, keywords = {netsolve}, author = {Dorian Arnold and Jack Dongarra} } @inproceedings {icl:42, title = {A New Recursive Implementation of Sparse Cholesky Factorization}, journal = {Proceedings of 16th IMACS World Congress 2000 on Scientific Computing, Applications Mathematics and Simulation}, year = {2000}, month = {2000-08}, address = {Lausanne, Switzerland}, author = {Jack Dongarra and Padma Raghavan} } @techreport {icl:36, title = {Performance of Various Computers Using Standard Linear Equations Software (Linpack Benchmark Report)}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {CS-89-85}, year = {2000}, month = {2000-01}, author = {Jack Dongarra} } @article {icl:31, title = {A Portable Programming Interface for Performance Evaluation on Modern Processors}, journal = {The International Journal of High Performance Computing Applications}, volume = {14}, number = {3}, year = {2000}, month = {2000-09}, pages = {189-204}, keywords = {papi}, doi = {https://doi.org/10.1177/109434200001400303}, author = {Shirley Browne and Jack Dongarra and Nathan Garner and George Ho and Phil Mucci} } @techreport {icl:226, title = {A Portable Programming Interface for Performance Evaluation on Modern Processors}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-00-444}, year = {2000}, month = {2000-07}, author = {Shirley Browne and Jack Dongarra and Nathan Garner and Kevin London and Phil Mucci} } @article {icl:227, title = {Providing Infrastructure and Interface to High Performance Applications in a Distributed Setting}, journal = {ASTC-HPC 2000}, year = {2000}, month = {2000-04}, address = {Washington, DC}, author = {Dorian Arnold and Wonsuck Lee and Jack Dongarra and Mary Wheeler} } @inproceedings {icl:39, title = {Recent Advances in Parallel Virtual Machine and Message Passing Interface}, journal = {Lecture Notes in Computer Science: Proceedings of 7th European PVM/MPI Users{\textquoteright} Group Meeting 2000}, year = {2000}, month = {2000-01}, pages = {V1908}, address = {(Hungary: Springer Verlag)}, keywords = {ftmpi}, author = {Jack Dongarra and Peter Kacsuk and N. Podhorszki} } @inproceedings {icl:38, title = {Recursive approach in sparse matrix LU factorization}, journal = {Proceedings of 1st SGI Users Conference}, year = {2000}, month = {2000-01}, pages = {409-418}, address = {Cracow, Poland (ACC Cyfronet UMM, 2000)}, author = {Jack Dongarra and Victor Eijkhout and Piotr Luszczek} } @inproceedings {icl:24, title = {Request Sequencing: Optimizing Communication for the Grid}, journal = {Lecture Notes in Computer Science: Proceedings of 6th International Euro-Par Conference 2000, Parallel Processing}, year = {2000}, month = {2000-01}, pages = {V1900,1213-1222}, address = {(Germany: Springer Verlag 2000)}, keywords = {netsolve}, author = {Dorian Arnold and Dieter Bachmann and Jack Dongarra} } @inproceedings {icl:32, title = {A Scalable Cross-Platform Infrastructure for Application Performance Tuning Using Hardware Counters}, journal = {Proceedings of SuperComputing 2000 (SC{\textquoteright}00)}, year = {2000}, month = {2000-11}, address = {Dallas, TX}, keywords = {papi}, author = {Shirley Browne and Jack Dongarra and Nathan Garner and Kevin London and Phil Mucci} } @inproceedings {icl:25, title = {Seamless Access to Adaptive Solver Algorithms}, journal = {Proceedings of 16th IMACS World Congress 2000 on Scientific Computing, Applications Mathematics and Simulation}, year = {2000}, month = {2000-08}, address = {Lausanne, Switzerland}, keywords = {netsolve}, author = {Dorian Arnold and Susan Blackford and Jack Dongarra and Victor Eijkhout and Tinghua Xu} } @techreport {icl:228, title = {Secure Remote Access to Numerical Software and Computation Hardware}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-00-446}, year = {2000}, month = {2000-07}, author = {Dorian Arnold and Shirley Browne and Jack Dongarra and Graham Fagg and Keith Moore} } @inproceedings {icl:26, title = {Secure Remote Access to Numerical Software and Computational Hardware}, journal = {Proceedings of the DoD HPC Users Group Conference (HPCUG) 2000}, year = {2000}, month = {2000-06}, address = {Albuquerque, NM}, keywords = {netsolve}, author = {Dorian Arnold and Shirley Browne and Jack Dongarra and Graham Fagg and Keith Moore} } @techreport {icl:40, title = {Top500 Supercomputer Sites (15th edition)}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {UT-CS-00-442}, year = {2000}, month = {2000-06}, keywords = {top500}, author = {Jack Dongarra and Hans Meuer and Erich Strohmaier} } @article {icl:57, title = {Algorithmic Issues on Heterogeneous Computing Platforms}, journal = {Parallel Processing Letters}, volume = {9}, number = {2}, year = {1999}, month = {1999-01}, pages = {197-213}, author = {Pierre Boulet and Jack Dongarra and Fabrice Rastello and Yves Robert and Frederic Vivien} } @article {icl:56, title = {Atlanta Organizers Put Mathematics to Work For the Math Sciences Community}, journal = {SIAM News}, volume = {32}, number = {6}, year = {1999}, month = {1999-01}, author = {Michael Berry and Jack Dongarra} } @techreport {icl:51, title = {A Comparison of Parallel Solvers for Diagonally Dominant and General Narrow Banded Linear Systems II (LAPACK Working Note 143)}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {UT-CS-99-415}, year = {1999}, month = {1999-01}, author = {Peter Arbenz and Andrew Cleary and Jack Dongarra and Markus Hegland} } @techreport {icl:52, title = {A Comparison of Parallel Solvers for General Narrow Banded Linear Systems (LAPACK Working Note 142)}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-CS-99-414}, year = {1999}, month = {1999-01}, author = {Peter Arbenz and Andrew Cleary and Jack Dongarra and Markus Hegland} } @article {icl:33, title = {Deploying Fault-tolerance and Task Migration with NetSolve}, journal = {Future Generation Computer Systems}, volume = {15}, number = {5-6}, year = {1999}, month = {1999-10}, pages = {745-755}, publisher = {Elsevier}, keywords = {netsolve}, author = {Henri Casanova and James Plank and Micah Beck and Jack Dongarra} } @article {icl:74, title = {Experiences with Windows 95/NT as a Cluster Computing Platform for Parallel Computing}, journal = {Parallel and Distributed Computing Practices, Special Issue: Cluster Computing}, volume = {2}, number = {2}, year = {1999}, month = {1999-02}, pages = {119-128}, publisher = {Nova Science Publishers, USA}, author = {Markus Fischer and Jack Dongarra} } @article {icl:55, title = {HARNESS: A Next Generation Distributed Virtual Machine}, journal = {International Journal on Future Generation Computer Systems}, volume = {15}, number = {5-6}, year = {1999}, month = {1999-01}, pages = {571-582}, keywords = {harness}, author = {Micah Beck and Jack Dongarra and Graham Fagg and Al Geist and Paul Gray and James Kohl and Mauro Migliardi and Keith Moore and Terry Moore and Philip Papadopoulous and Stephen L. Scott and Vaidy Sunderam} } @article {icl:50, title = {LAPACK Users{\textquoteright} Guide, 3rd ed.}, journal = {Philadelphia: Society for Industrial and Applied Mathematics}, year = {1999}, month = {1999-01}, author = {Ed Anderson and Zhaojun Bai and Christian Bischof and Susan Blackford and James Demmel and Jack Dongarra and Jeremy Du Croz and Anne Greenbaum and Sven Hammarling and Alan McKenney and Danny Sorensen} } @article {icl:53, title = {Logistical Quality of Service in NetSolve}, journal = {Computer Communications}, volume = {22}, number = {11}, year = {1999}, month = {1999-01}, pages = {1034-1044}, keywords = {netsolve}, author = {Micah Beck and Henri Casanova and Jack Dongarra and Terry Moore and James Plank and Francine Berman and Rich Wolski} } @article {icl:60, title = {Numerical Libraries and Tools for Scalable Parallel Cluster Computing}, journal = {IEEE Cluster Computing BOF at SC99}, year = {1999}, month = {1999-01}, address = {Portland, Oregon}, author = {Shirley Browne and Jack Dongarra and Anne Trefethen} } @article {icl:66, title = {Numerical Linear Algebra}, journal = {Encyclopedia of Computer Science and Technology, eds. Kent, A., Williams, J.}, volume = {41}, year = {1999}, month = {1999-08}, pages = {207-233}, author = {Jack Dongarra and Victor Eijkhout}, editor = {Marcel Dekker} } @article {icl:65, title = {Numerical Linear Algebra Algorithms and Software}, journal = {Journal of Computational and Applied Mathematics}, volume = {123}, number = {1-2}, year = {1999}, month = {1999-10}, pages = {489-514}, author = {Jack Dongarra and Victor Eijkhout} } @article {icl:229, title = {A Numerical Linear Algebra Problem Solving Environment Designer{\textquoteright}s Perspective (LAPACK Working Note 139)}, journal = {SIAM Annual Meeting}, year = {1999}, month = {1999-05}, address = {Atlanta, GA}, author = {Antoine Petitet and Henri Casanova and Clint Whaley and Jack Dongarra and Yves Robert} } @article {icl:75, title = {Parallel and Distributed Scientific Computing: A Numerical Linear Algebra Problem Solving Environment Designer{\textquoteright}s Perspective}, journal = {Handbook on Parallel and Distributed Processing}, year = {1999}, month = {1999-01}, author = {Antoine Petitet and Henri Casanova and Jack Dongarra and Yves Robert and Clint Whaley} } @article {icl:73, title = {Scalable Networked Information Processing Environment (SNIPE)}, journal = {Journal on Future Generation Computer Systems}, volume = {15}, number = {5/6}, year = {1999}, month = {1999-01}, pages = {595-605}, keywords = {harness}, author = {Graham Fagg and Keith Moore and Jack Dongarra} } @article {icl:58, title = {Static Tiling for Heterogeneous Computing Platforms}, journal = {Parallel Computing}, volume = {25}, number = {5}, year = {1999}, month = {1999-01}, pages = {547-568}, author = {Pierre Boulet and Jack Dongarra and Yves Robert and Frederic Vivien} } @article {icl:63, title = {Stochastic Performance Prediction for Iterative Algorithms in Distributed Environments}, journal = {Journal of Parallel and Distributed Computing}, volume = {98}, number = {1}, year = {1999}, month = {1999-01}, pages = {68-91}, author = {Henri Casanova and Myung Ho Kim and James Plank and Jack Dongarra} } @article {icl:62, title = {Tiling on Systems with Communication/Computation Overlap}, journal = {Concurrency: Practice and Experience}, volume = {11}, number = {3}, year = {1999}, month = {1999-01}, pages = {139-153}, author = {Pierre-Yves Calland and Jack Dongarra and Yves Robert} } @techreport {icl:67, title = {Top500 Supercomputer Sites (13th edition)}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {UT-CS-99-425}, year = {1999}, month = {1999-06}, keywords = {top500}, author = {Jack Dongarra and Hans Meuer and Erich Strohmaier} } @techreport {icl:68, title = {Top500 Supercomputer Sites (14th edition)}, journal = {University of Tennessee Computer Science Department Technical Report}, number = {UT-CS-99-434}, year = {1999}, month = {1999-11}, keywords = {top500}, author = {Jack Dongarra and Hans Meuer and Erich Strohmaier} } @conference {967, title = {Automatically Tuned Linear Algebra Software}, booktitle = {1998 ACM/IEEE conference on Supercomputing (SC {\textquoteright}98)}, year = {1998}, month = {1998-11}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Orlando, FL}, abstract = {This paper describes an approach for the automatic generation and optimization of numerical software for processors with deep memory hierarchies and pipelined functional units. The production of such software for machines ranging from desktop workstations to embedded processors can be a tedious and time consuming process. The work described here can help in automating much of this process. We will concentrate our efforts on the widely used linear algebra kernels called the Basic Linear Algebra Subroutines (BLAS). In particular, the work presented here is for general matrix multiply, DGEMM. However much of the technology and approach developed here can be applied to the other Level 3 BLAS and the general strategy can have an impact on basic linear algebra operations in general and may be extended to other important kernel operations.}, keywords = {BLAS, code generation, high performance, linear algebra, optimization, Tuning}, isbn = {0-89791-984-X}, author = {Clint Whaley and Jack Dongarra} } @book {1469, title = {MPI - The Complete Reference, Volume 1: The MPI Core}, year = {1998}, month = {1998-08}, pages = {426}, publisher = {MIT Press}, organization = {MIT Press}, edition = {Second}, address = {Cambridge, MA, USA}, abstract = {Since its release in summer 1994, the Message Passing Interface (MPI) specification has become a standard for message-passing libraries for parallel computations. There exist more than a dozen implementations on a variety of computing platforms, from the IBM SP-2 supercomputer to PCs running Windows NT. The initial MPI Standard, known as MPI-1, has been modified over the last two years. This volume, the definitive reference manual for the latest version of MPI-1, contains a complete specification of the MPI Standard. It is annotated with comments that clarify complicated issues, including why certain design choices were made, how users are intended to use the interface, and how they should construct their version of MPI. The volume also provides many detailed, illustrative programming examples.}, isbn = {978-0-262-69215-1}, author = {Marc Snir and Steve Otto and Steven Huss-Lederman and David Walker and Jack Dongarra} } @article {icl:256, title = {National HPCC Software Exchange (NHSE): Uniting the High Performance Computing and Communications Community}, journal = {D-Lib Magazine}, year = {1998}, month = {1998-01}, keywords = {rib}, author = {Shirley Browne and Jack Dongarra and Jeff Horner and Paul McMahan and Scott Wells} } @book {1468, title = {Numerical Linear Algebra for High-Performance Computers}, series = {Software, Environments and Tools}, year = {1998}, publisher = {SIAM}, organization = {SIAM}, abstract = {This book presents a unified treatment of recently developed techniques and current understanding about solving systems of linear equations and large scale eigenvalue problems on high-performance computers. It provides a rapid introduction to the world of vector and parallel processing for these linear algebra applications. Topics include major elements of advanced-architecture computers and their performance, recent algorithmic development, and software for direct solution of dense matrix problems, direct solution of sparse systems of equations, iterative solution of sparse systems of equations, and solution of large sparse eigenvalue problems. This book supersedes the SIAM publication Solving Linear Systems on Vector and Shared Memory Computers, which appeared in 1990. The new book includes a considerable amount of new material in addition to incorporating a substantial revision of existing text.}, doi = {https://doi.org/10.1137/1.9780898719611}, author = {Jack Dongarra and Iain Duff and Danny Sorensen and Henk van der Vorst} } @article {1467, title = {ScaLAPACK: A Portable Linear Algebra Library for Distributed Memory Computers - Design Issues and Performance}, journal = {Computer Physics Communications}, volume = {97}, year = {1996}, month = {1996-08}, pages = {1-15}, abstract = {This paper outlines the content and performance of ScaLAPACK, a collection of mathematical software for linear algebra computations on distributed memory computers. The importance of developing standards for computational and message passing interfaces is discussed. We present the different components and building blocks of ScaLAPACK. This paper outlines the difficulties inherent in producing correct codes for networks of heterogeneous processors. We define a theoretical model of parallel computers dedicated to linear algebra applications: the Distributed Linear Algebra Machine (DLAM). This model provides a convenient framework for developing parallel algorithms and investigating their scalability, performance and programmability. Extensive performance results on various platforms are presented and analyzed with the help of the DLAM. Finally, this paper briefly describes future directions for the ScaLAPACK library and concludes by suggesting alternative approaches to mathematical libraries, explaining how ScaLAPACK could be integrated into efficient and user-friendly distributed systems.}, doi = {https://doi.org/10.1016/0010-4655(96)00017-3}, author = {Jaeyoung Choi and Jim Demmel and Inderjit Dhillon and Jack Dongarra and Susan Ostrouchov and Antoine Petitet and Kendall Stanley and David Walker and Clint Whaley} }