@techreport {1461, title = {FFT-ECP API and High-Performance Library Prototype for 2-D and 3-D FFTs on Large-Scale Heterogeneous Systems with GPUs}, journal = {ECP Milestone Report}, number = {FFT-ECP STML13-27}, year = {2020}, note = {revision 01-2020}, month = {2020-01}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {ECP WBS 2.3.3.13 Milestone Report}, author = {Stanimire Tomov and Alan Ayala and Azzam Haidar and Jack Dongarra} } @conference {1481, title = {heFFTe: Highly Efficient FFT for Exascale}, booktitle = {International Conference on Computational Science (ICCS 2020)}, year = {2020}, month = {2020-06}, address = {Amsterdam, Netherlands}, abstract = {Exascale computing aspires to meet the increasing demands from large scientific applications. Software targeting exascale is typically designed for heterogeneous architectures; henceforth, it is not only important to develop well-designed software, but also make it aware of the hardware architecture and efficiently exploit its power. Currently, several and diverse applications, such as those part of the Exascale Computing Project (ECP) in the United States, rely on efficient computation of the Fast Fourier Transform (FFT). In this context, we present the design and implementation of heFFTe (Highly Efficient FFT for Exascale) library, which targets the upcoming exascale supercomputers. We provide highly (linearly) scalable GPU kernels that achieve more than 40{\texttimes} speedup with respect to local kernels from CPU state-of-the-art libraries, and over 2{\texttimes} speedup for the whole FFT computation. A communication model for parallel FFTs is also provided to analyze the bottleneck for large-scale problems. We show experiments obtained on Summit supercomputer at Oak Ridge National Laboratory, using up to 24,576 IBM Power9 cores and 6,144 NVIDIA V-100 GPUs.}, keywords = {exascale, FFT, gpu, scalable algorithm}, doi = {https://doi.org/10.1007/978-3-030-50371-0_19}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-10}, publisher = {NVIDIA GPU Technology Conference (GTC2020)}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-02}, publisher = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP20)}, address = {Seattle, WA}, abstract = {Considered one of the top 10 algorithms of the 20th century, the Fast Fourier Transform (FFT) is widely used by applications in science and engineering. Large scale parallel applications targeting exascale, such as those part of the DOE Exascale Computing Project (ECP), are designed for heterogeneous architectures and, currently, more than a dozen ECP applications use FFTs in their codes. To address the applications needs, we developed the highly efficient FFTs for exascale (heFFTe) library. The heFFTe library release features very good weak and strong scalability and performance that is close to 90\% of the roofline peak performance. We present these performance results on the Summit supercomputer. heFFTe is also integrated in a number of applications and we present how the overall performance gets improved by using hFFTe. Performance model, limitations, and challenges are discussed for current and upcoming computer architectures.}, author = {Alan Ayala and Stanimire Tomov and Azzam Haidar and Jack Dongarra} } @article {, title = {heFFTe: Highly Efficient FFT for Exascale (Poster)}, year = {2020}, month = {2020-02}, publisher = {2020 Exascale Computing Project Annual Meeting}, address = {Houston, TX}, author = {Alan Ayala and Stanimire Tomov and Jack Dongarra and Azzam Haidar} } @article {, title = {MAGMA Templates for Scalable Linear Algebra on Emerging Architectures}, journal = {The International Journal of High Performance Computing Applications}, volume = {34}, year = {2020}, month = {2020-11}, pages = {645-658}, abstract = {With the acquisition and widespread use of more resources that rely on accelerator/wide vector{\textendash}based computing, there has been a strong demand for science and engineering applications to take advantage of these latest assets. This, however, has been extremely challenging due to the diversity of systems to support their extreme concurrency, complex memory hierarchies, costly data movement, and heterogeneous node architectures. To address these challenges, we design a programming model and describe its ease of use in the development of a new MAGMA Templates library that delivers high-performance scalable linear algebra portable on current and emerging architectures. MAGMA Templates derives its performance and portability by (1) building on existing state-of-the-art linear algebra libraries, like MAGMA, SLATE, Trilinos, and vendor-optimized math libraries, and (2) providing access (seamlessly to the users) to the latest algorithms and architecture-specific optimizations through a single, easy-to-use C++-based API.}, issn = {1094-3420}, doi = {https://doi.org/10.1177/1094342020938421}, author = {Mohammed Al Farhan and Ahmad Abdelfattah and Stanimire Tomov and Mark Gates and Dalal Sukkari and Azzam Haidar and Robert Rosenberg and Jack Dongarra} } @article {, title = {Mixed-Precision Iterative Refinement using Tensor Cores on GPUs to Accelerate Solution of Linear Systems}, journal = {Proceedings of the Royal Society A}, volume = {476}, year = {2020}, month = {2020-11}, abstract = {Double-precision floating-point arithmetic (FP64) has been the de facto standard for engineering and scientific simulations for several decades. Problem complexity and the sheer volume of data coming from various instruments and sensors motivate researchers to mix and match various approaches to optimize compute resources, including different levels of floating-point precision. In recent years, machine learning has motivated hardware support for half-precision floating-point arithmetic. A primary challenge in high-performance computing is to leverage reduced-precision and mixed-precision hardware. We show how the FP16/FP32 Tensor Cores on NVIDIA GPUs can be exploited to accelerate the solution of linear systems of equations Ax = b without sacrificing numerical stability. The techniques we employ include multiprecision LU factorization, the preconditioned generalized minimal residual algorithm (GMRES), and scaling and auto-adaptive rounding to avoid overflow. We also show how to efficiently handle systems with multiple right-hand sides. On the NVIDIA Quadro GV100 (Volta) GPU, we achieve a 4{\texttimes}-5{\texttimes} performance increase and 5{\texttimes} better energy efficiency versus the standard FP64 implementation while maintaining an FP64 level of numerical stability.}, keywords = {GMRESLU factorization, GPU computing, half precision arithmetic, iterative refinement, mixed precision solvers}, issn = {1471-2946}, doi = {https://doi.org/10.1098/rspa.2020.0110}, author = {Azzam Haidar and Harun Bayraktar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @techreport {1471, title = {Mixed-Precision Solution of Linear Systems Using Accelerator-Based Computing}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-05}, year = {2020}, month = {2020-05}, publisher = {University of Tennessee}, abstract = {Double-precision floating-point arithmetic (FP64) has been the de facto standard for engineering and scientific simulations for several decades. Problem complexity and the sheer volume of data coming from various instruments and sensors motivate researchers to mix and match various approaches to optimize compute resources, including different levels of floating-point precision. In recent years, machine learning has motivated hardware support for half-precision floating-point arithmetic. A primary challenge in high-performance computing is to leverage reduced- and mixed-precision hardware. We show how the FP16/FP32 Tensor Cores on NVIDIA GPUs can be exploited to accelerate the solution of linear systems of equations Ax = b without sacrificing numerical stability. We achieve a 4{\texttimes}{\textendash}5{\texttimes} performance increase and 5{\texttimes} better energy efficiency versus the standard FP64 implementation while maintaining an FP64 level of numerical stability.}, author = {Azzam Haidar and Harun Bayraktar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @article {, title = {A Set of Batched Basic Linear Algebra Subprograms}, journal = {ACM Transactions on Mathematical Software}, year = {2020}, month = {2020-10}, abstract = {This paper describes a standard API for a set of Batched Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). The focus is on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The matrices are grouped together in uniformly sized groups, with just one group if all the matrices are of equal size. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance many-core platforms. These include multicore and many-core CPU processors, GPUs and coprocessors, and other hardware accelerators with floating-point compute facility. As well as the standard types of single and double precision, we also include half and quadruple precision in the standard. In particular half precision is used in many very large scale applications, such as those associated with machine learning.}, author = {Ahmad Abdelfattah and Timothy Costa and Jack Dongarra and Mark Gates and Azzam Haidar and Sven Hammarling and Nicholas J. Higham and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Mawussi Zounon} } @article {1262, title = {Algorithms and Optimization Techniques for High-Performance Matrix-Matrix Multiplications of Very Small Matrices}, journal = {Parallel Computing}, volume = {81}, year = {2019}, month = {2019-01}, pages = {1{\textendash}21}, abstract = {Expressing scientific computations in terms of BLAS, and in particular the general dense matrix-matrix multiplication (GEMM), is of fundamental importance for obtaining high performance portability across architectures. However, GEMMs for small matrices of sizes smaller than 32 are not sufficiently optimized in existing libraries. We consider the computation of many small GEMMs and its performance portability for a wide range of computer architectures, including Intel CPUs, ARM, IBM, Intel Xeon Phi, and GPUs. These computations often occur in applications like big data analytics, machine learning, high-order finite element methods (FEM), and others. The GEMMs are grouped together in a single batched routine. For these cases, we present algorithms and their optimization techniques that are specialized for the matrix sizes and architectures of interest. We derive a performance model and show that the new developments can be tuned to obtain performance that is within 90\% of the optimal for any of the architectures of interest. For example, on a V100 GPU for square matrices of size 32, we achieve an execution rate of about 1600 gigaFLOP/s in double-precision arithmetic, which is 95\% of the theoretically derived peak for this computation on a V100 GPU. We also show that these results outperform currently available state-of-the-art implementations such as vendor-tuned math libraries, including Intel MKL and NVIDIA CUBLAS, as well as open-source libraries like OpenBLAS and Eigen.}, keywords = {Autotuning, Batched GEMM, HPC, Matrix-matrix product, optimization, Small matrices}, doi = {https://doi.org/10.1016/j.parco.2018.10.003}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Marc Baboulin and Jo{\"e}l Falcou and Jack Dongarra} } @techreport {1322, title = {Design and Implementation for FFT-ECP on Distributed Accelerated Systems}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-05}, year = {2019}, month = {2019-04}, publisher = {University of Tennessee}, type = {ECP WBS 2.3.3.09 Milestone Report}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Daniel Schultz and Jack Dongarra} } @article {1269, title = {Evaluation of Directive-Based Performance Portable Programming Models}, journal = {International Journal of High Performance Computing and Networking}, volume = {14}, year = {2019}, month = {2019{\textendash}07}, pages = {165-182}, abstract = {We present an extended exploration of the performance portability of directives provided by OpenMP 4 and OpenACC to program various types of node architecture with attached accelerators, both self-hosted multicore and offload multicore/GPU. Our goal is to examine how successful OpenACC and the newer offload features of OpenMP 4.5 are for moving codes between architectures, and we document how much tuning might be required and what lessons we can learn from these experiences. To do this, we use examples of algorithms with varying computational intensities for our evaluation, as both compute and data access efficiency are important considerations for overall application performance. To better understand fundamental compute vs. bandwidth bound characteristics, we add the compute-bound Level 3 BLAS GEMM kernel to our linear algebra evaluation. We implement the kernels of interest using various methods provided by newer OpenACC and OpenMP implementations, and we evaluate their performance on various platforms including both x86_64 and Power8 with attached NVIDIA GPUs, x86_64 multicores, self-hosted Intel Xeon Phi KNL, as well as an x86_64 host system with Intel Xeon Phi coprocessors. We update these evaluations with the newest version of the NVIDIA Pascal architecture (P100), Intel KNL 7230, Power8+, and the newest supporting compiler implementations. Furthermore, we present in detail what factors affected the performance portability, including how to pick the right programming model, its programming style, its availability on different platforms, and how well compilers can optimise and target multiple platforms.}, keywords = {OpenACC, OpenMP 4, performance portability, Programming models}, doi = {http://dx.doi.org/10.1504/IJHPCN.2017.10009064 }, author = {M. Graham Lopez and Wayne Joubert and Ver{\'o}nica Larrea and Oscar Hernandez and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1329, title = {FFT-ECP Fast Fourier Transform}, year = {2019}, month = {2019-01}, publisher = {2019 ECP Annual Meeting (Research Poster)}, address = {Houston, TX}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Daniel Schultz and Jack Dongarra} } @techreport {1401, title = {FFT-ECP Implementation Optimizations and Features Phase}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-19-12}, year = {2019}, month = {2019-10}, publisher = {University of Tennessee}, author = {Stanimire Tomov and Azzam Haidar and Alan Ayala and Hejer Shaiek and Jack Dongarra} } @article {1385, title = {GPUDirect MPI Communications and Optimizations to Accelerate FFTs on Exascale Systems}, journal = {EuroMPI{\textquoteright}19 Posters, Zurich, Switzerland}, number = {icl-ut-19-06}, year = {2019}, month = {2019-09}, publisher = {ICL}, type = {Extended Abstract}, abstract = {Fast Fourier transforms (FFTs) are used in applications ranging from molecular dynamics and spectrum estimation to machine learn- ing, fast convolution and correlation, signal modulation, wireless multimedia applications, and others. However, FFTs are memory bound, and therefore, to accelerate them, it is crucial to avoid and optimize the FFTs{\textquoteright} communications. To this end, we present a 3-D FFT design for distributed graphics processing unit (GPU) systems that: (1) efficiently uses GPUs{\textquoteright} high bandwidth, (2) reduces global communications algorithmically, when possible, and (3) employs GPUDirect technologies as well as MPI optimizations in the development of high-performance FFTs for large-scale GPU-accelerated systems. We show that these developments and optimizations lead to very good strong scalability and a performance that is close to 90\% of the theoretical peak.}, keywords = {CUDA-Aware MPI, ECP, FFT, FFT-ECP, gpu, GPUDirect}, author = {Hejer Shaiek and Stanimire Tomov and Alan Ayala and Azzam Haidar and Jack Dongarra} } @conference {1403, title = {Impacts of Multi-GPU MPI Collective Communications on Large FFT Computation}, booktitle = {Workshop on Exascale MPI (ExaMPI) at SC19}, year = {2019}, month = {2019-11}, address = {Denver, CO}, keywords = {Collective MPI, Exascale applications, FFT, Heterogeneous systems, scalable}, author = {Alan Ayala and Stanimire Tomov and Xi Luo and Hejer Shaiek and Azzam Haidar and George Bosilca and Jack Dongarra} } @article {1366, title = {MagmaDNN 0.2 High-Performance Data Analytics for Manycore GPUs and CPUs}, year = {2019}, month = {2019-01}, publisher = {University of Tennessee}, doi = {10.13140/RG.2.2.14906.64961}, author = {Lucien Ng and Sihan Chen and Alex Gessinger and Daniel Nichols and Sophia Cheng and Anu Meenasorna and Kwai Wong and Stanimire Tomov and Azzam Haidar and Eduardo D{\textquoteright}Azevedo and Jack Dongarra} } @article {1270, title = {PLASMA: Parallel Linear Algebra Software for Multicore Using OpenMP}, journal = {ACM Transactions on Mathematical Software}, volume = {45}, year = {2019}, month = {2019-06}, doi = {https://doi.org/10.1145/3264491}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Maksims Abalenkovs and Negin Bagherpour and Sven Hammarling and Jakub Sistek} } @article {1336, title = {Accelerating Linear Algebra with MAGMA}, year = {2018}, month = {2018-02}, publisher = {ECP Annual Meeting 2018, Tutorial}, address = {Knoxville, TN}, author = {Stanimire Tomov and Mark Gates and Azzam Haidar} } @article {1266, title = {Accelerating the SVD Bi-Diagonalization of a Batch of Small Matrices using GPUs}, journal = {Journal of Computational Science}, volume = {26}, year = {2018}, month = {2018-05}, pages = {237{\textendash}245}, abstract = {The acceleration of many small-sized linear algebra problems has become extremely challenging for current many-core architectures, and in particular GPUs. Standard interfaces have been proposed for some of these problems, called batched problems, so that they get targeted for optimization and used in a standard way in applications, calling them directly from highly optimized, standard numerical libraries, like (batched) BLAS and LAPACK. While most of the developments have been for one-sided factorizations and solvers, many important applications {\textendash} from big data analytics to information retrieval, low-rank approximations for solvers and preconditioners {\textendash} require two-sided factorizations, and most notably the SVD factorization. To address these needs and the parallelization challenges related to them, we developed a number of new batched computing techniques and designed batched Basic Linear Algebra Subroutines (BLAS) routines, and in particular the Level-2 BLAS GEMV and the Level-3 BLAS GEMM routines, to solve them. We propose a device functions-based methodology and big-tile setting techniques in our batched BLAS design. The different optimization techniques result in many software versions that must be tuned, for which we adopt an auto-tuning strategy to automatically derive the optimized instances of the routines. We illustrate our batched BLAS approach to optimize batched SVD bi-diagonalization progressively on GPUs. The progression is illustrated on an NVIDIA K40c GPU, and also, ported and presented on AMD Fiji Nano GPU, using AMD{\textquoteright}s Heterogeneous{\textendash}Compute Interface for Portability (HIP) C++ runtime API. We demonstrate achieving 80\% of the theoretically achievable peak performance for the overall algorithm, and significant acceleration of the Level-2 BLAS GEMV and Level-3 BLAS GEMM needed compared to vendor-optimized libraries on GPUs and multicore CPUs. The optimization techniques in this paper are applicable to the other two-sided factorizations as well.}, keywords = {Batched, Eigenvalue and singular value problems, hardware accelerators, numerical linear algebra, Two-sided factorization algorithms}, doi = {https://doi.org/10.1016/j.jocs.2018.01.007}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @techreport {1229, title = {Algorithms and Optimization Techniques for High-Performance Matrix-Matrix Multiplications of Very Small Matrices}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-18-09}, year = {2018}, month = {2018-09}, publisher = {Innovative Computing Laboratory, University of Tennessee}, abstract = {Expressing scientific computations in terms of BLAS, and in particular the general dense matrix-matrix multiplication (GEMM), is of fundamental importance for obtaining high performance portability across architectures. However, GEMMs for small matrices of sizes smaller than 32 are not sufficiently optimized in existing libraries. We consider the computation of many small GEMMs and its performance portability for a wide range of computer architectures, including Intel CPUs, ARM, IBM, Intel Xeon Phi, and GPUs. These computations often occur in applications like big data analytics, machine learning, high-order finite element methods (FEM), and others. The GEMMs are grouped together in a single batched routine. For these cases, we present algorithms and their optimization techniques that are specialized for the matrix sizes and architectures of interest. We derive a performance model and show that the new developments can be tuned to obtain performance that is within 90\% of the optimal for any of the architectures of interest. For example, on a V100 GPU for square matrices of size 32, we achieve an execution rate of about 1; 600 gigaFLOP/s in double-precision arithmetic, which is 95\% of the theoretically derived peak for this computation on a V100 GPU. We also show that these results outperform currently available state-of-the-art implementations such as vendor-tuned math libraries, including Intel MKL and NVIDIA CUBLAS, as well as open-source libraries like OpenBLAS and Eigen.}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Marc Baboulin and Jo{\"e}l Falcou and Jack Dongarra} } @article {1260, title = {Analysis and Design Techniques towards High-Performance and Energy-Efficient Dense Linear Solvers on GPUs}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {29}, year = {2018}, month = {2018-12}, pages = {2700{\textendash}2712}, abstract = {Graphics Processing Units (GPUs) are widely used in accelerating dense linear solvers. The matrix factorizations, which dominate the runtime for these solvers, are often designed using a hybrid scheme, where GPUs perform trailing matrix updates, while the CPUs perform the panel factorizations. Consequently, hybrid solutions require high-end CPUs and optimized CPU software in order to deliver high performance. Furthermore, they lack the energy efficiency inherent for GPUs due to the use of less energy-efficient CPUs, as well as CPU-GPU communications. This paper presents analysis and design techniques that overcome the shortcomings of the hybrid algorithms, and allow the design of high-performance and energy-efficient dense LU and Cholesky factorizations that use GPUs only. The full GPU solution eliminates the need for a high-end CPU and optimized CPU software, which leads to a better energy efficiency. We discuss different design choices, and introduce optimized GPU kernels for panel factorizations. The developed solutions achieve 90+ percent of the performance of optimized hybrid solutions, while improving the energy efficiency by 50 percent. They outperform the vendor library by 30-50 percent in single precision, and 15-50 percent in double precision. We also show that hybrid designs trail the proposed solutions in performance when optimized CPU software is not available.}, keywords = {Dense linear solvers, energy efficiency, GPU computing}, doi = {10.1109/TPDS.2018.2842785}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1300, title = {Batched BLAS (Basic Linear Algebra Subprograms) 2018 Specification}, year = {2018}, month = {2018-07}, abstract = {This document describes an API for Batch Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). We focus on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The extensions beyond the original BLAS standard are considered that specify a programming interface not only for routines with uniformly-sized matrices and/or vectors but also for the situation where the sizes vary. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance manycore platforms. These include multicore and many-core CPU processors; GPUs and coprocessors; as well as other hardware accelerators with floating-point compute facility.}, author = {Jack Dongarra and Iain Duff and Mark Gates and Azzam Haidar and Sven Hammarling and Nicholas J. Higham and Jonathan Hogg and Pedro Valero Lara and Piotr Luszczek and Mawussi Zounon and Samuel D. Relton and Stanimire Tomov and Timothy Costa and Sarah Knepper} } @article {1209, title = {Batched One-Sided Factorizations of Tiny Matrices Using GPUs: Challenges and Countermeasures}, journal = {Journal of Computational Science}, volume = {26}, year = {2018}, month = {2018-05}, pages = {226{\textendash}236}, abstract = {The use of batched matrix computations recently gained a lot of interest for applications, where the same operation is applied to many small independent matrices. The batched computational pattern is frequently encountered in applications of data analytics, direct/iterative solvers and preconditioners, computer vision, astrophysics, and more, and often requires specific designs for vectorization and extreme parallelism to map well on today{\textquoteright}s high-end many-core architectures. This has led to the development of optimized software for batch computations, and to an ongoing community effort to develop standard interfaces for batched linear algebra software. Furthering these developments, we present GPU design and optimization techniques for high-performance batched one-sided factorizations of millions of tiny matrices (of size 32 and less). We quantify the effects and relevance of different techniques in order to select the best-performing LU, QR, and Cholesky factorization designs. While we adapt common optimization techniques, such as optimal memory traffic, register blocking, and concurrency control, we also show that a different mindset and techniques are needed when matrices are tiny, and in particular, sub-vector/warp in size. The proposed routines are part of the MAGMA library and deliver significant speedups compared to their counterparts in currently available vendor-optimized libraries. Notably, we tune the developments for the newest V100 GPU from NVIDIA to show speedups of up to 11.8{\texttimes}.}, keywords = {batch computation, GPU computing, matrix factorization}, doi = {https://doi.org/10.1016/j.jocs.2018.01.005}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1263, title = {Computational Benefit of GPU Optimization for Atmospheric Chemistry Modeling}, journal = {Journal of Advances in Modeling Earth Systems}, volume = {10}, year = {2018}, month = {2018-08}, pages = {1952{\textendash}1969}, abstract = {Global chemistry-climate models are computationally burdened as the chemical mechanisms become more complex and realistic. Optimization for graphics processing units (GPU) may make longer global simulation with regional detail possible, but limited study has been done to explore the potential benefit for the atmospheric chemistry modeling. Hence, in this study, the second-order Rosenbrock solver of the chemistry module of CAM4-Chem is ported to the GPU to gauge potential speed-up. We find that on the CPU, the fastest performance is achieved using the Intel compiler with a block interleaved memory layout. Different combinations of compiler and memory layout lead to ~11.02{\texttimes} difference in the computational time. In contrast, the GPU version performs the best when using a combination of fully interleaved memory layout with block size equal to the warp size, CUDA streams for independent kernels, and constant memory. Moreover, the most efficient data transfer between CPU and GPU is gained by allocating the memory contiguously during the data initialization on the GPU. Compared to one CPU core, the speed-up of using one GPU alone reaches a factor of ~11.7{\texttimes} for the computation alone and ~3.82{\texttimes} when the data transfer between CPU and GPU is considered. Using one GPU alone is also generally faster than the multithreaded implementation for 16 CPU cores in a compute node and the single-source solution (OpenACC). The best performance is achieved by the implementation of the hybrid CPU/GPU version, but rescheduling the workload among the CPU cores is required before the practical CAM4-Chem simulation.}, keywords = {compiler, CUDA, data transfer, gpu, hybrid, memory layout}, doi = {https://doi.org/10.1029/2018MS001276}, author = {Jian Sun and Joshua Fu and John Drake and Qingzhao Zhu and Azzam Haidar and Mark Gates and Stanimire Tomov and Jack Dongarra} } @inproceedings {1259, title = {The Design of Fast and Energy-Efficient Linear Solvers: On the Potential of Half-Precision Arithmetic and Iterative Refinement Techniques}, journal = {International Conference on Computational Science (ICCS 2018)}, volume = {10860}, year = {2018}, month = {2018-06}, pages = {586{\textendash}600}, publisher = {Springer}, address = {Wuxi, China}, abstract = {As parallel computers approach exascale, power efficiency in high-performance computing (HPC) systems is of increasing concern. Exploiting both the hardware features and algorithms is an effective solution to achieve power efficiency, and to address the energy constraints in modern and future HPC systems. In this work, we present a novel design and implementation of an energy-efficient solution for dense linear systems of equations, which are at the heart of large-scale HPC applications. The proposed energy-efficient linear system solvers are based on two main components: (1) iterative refinement techniques, and (2) reduced-precision computing features in modern accelerators and coprocessors. While most of the energy efficiency approaches aim to reduce the consumption with a minimal performance penalty, our method improves both the performance and the energy efficiency. Compared to highly-optimized linear system solvers, our kernels deliver the same accuracy solution up to 2{\texttimes} faster and reduce the energy consumption up to half on Intel Knights Landing (KNL) architectures. By efficiently using the Tensor Cores available in the NVIDIA V100 PCIe GPUs, the speedups can be up to 4{\texttimes} , with more than 80\% reduction in the energy consumption.}, doi = {https://doi.org/10.1007/978-3-319-93698-7_45}, url = {https://rdcu.be/bcKSC}, author = {Azzam Haidar and Ahmad Abdelfattah and Mawussi Zounon and Panruo Wu and Srikara Pranesh and Stanimire Tomov and Jack Dongarra} } @techreport {1232, title = {Evaluation and Design of FFT for Distributed Accelerated Systems}, journal = {ECP WBS 2.3.3.09 Milestone Report}, number = {FFT-ECP ST-MS-10-1216}, year = {2018}, month = {2018-10}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Stanimire Tomov and Azzam Haidar and Daniel Schultz and Jack Dongarra} } @article {1208, title = {A Guide for Achieving High Performance with Very Small Matrices on GPUs: A Case Study of Batched LU and Cholesky Factorizations}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {29}, year = {2018}, month = {2018-05}, pages = {973{\textendash}984}, abstract = {We present a high-performance GPU kernel with a substantial speedup over vendor libraries for very small matrix computations. In addition, we discuss most of the challenges that hinder the design of efficient GPU kernels for small matrix algorithms. We propose relevant algorithm analysis to harness the full power of a GPU, and strategies for predicting the performance, before introducing a proper implementation. We develop a theoretical analysis and a methodology for high-performance linear solvers for very small matrices. As test cases, we take the Cholesky and LU factorizations and show how the proposed methodology enables us to achieve a performance close to the theoretical upper bound of the hardware. This work investigates and proposes novel algorithms for designing highly optimized GPU kernels for solving batches of hundreds of thousands of small-size Cholesky and LU factorizations. Our focus on efficient batched Cholesky and batched LU kernels is motivated by the increasing need for these kernels in scientific simulations (e.g., astrophysics applications). Techniques for optimal memory traffic, register blocking, and tunable concurrency are incorporated in our proposed design. The proposed GPU kernels achieve performance speedups versus CUBLAS of up to 6x for the factorizations, using double precision arithmetic on an NVIDIA Pascal P100 GPU.}, doi = {10.1109/TPDS.2017.2783929}, author = {Azzam Haidar and Ahmad Abdelfattah and Mawussi Zounon and Stanimire Tomov and Jack Dongarra} } @conference {1264, title = {Harnessing GPU Tensor Cores for Fast FP16 Arithmetic to Speed up Mixed-Precision Iterative Refinement Solvers}, booktitle = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC18)}, year = {2018}, month = {2018-11}, publisher = {IEEE}, organization = {IEEE}, address = {Dallas, TX}, abstract = {Low-precision floating-point arithmetic is a powerful tool for accelerating scientific computing applications, especially those in artificial intelligence. Here, we present an investigation showing that other high-performance computing (HPC) applications can also harness this power. Specifically, we use the general HPC problem, Ax = b, where A is a large dense matrix, and a double precision (FP64) solution is needed for accuracy. Our approach is based on mixed-precision (FP16-FP64) iterative refinement, and we generalize and extend prior advances into a framework, for which we develop architecture-specific algorithms and highly tuned implementations. These new methods show how using half-precision Tensor Cores (FP16-TC) for the arithmetic can provide up to 4{\texttimes} speedup. This is due to the performance boost that the FP16-TC provide as well as to the improved accuracy over the classical FP16 arithmetic that is obtained because the GEMM accumulation occurs in FP32 arithmetic.}, doi = {https://doi.org/10.1109/SC.2018.00050}, author = {Azzam Haidar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} } @article {1335, title = {Harnessing GPU{\textquoteright}s Tensor Cores Fast FP16 Arithmetic to Speedup Mixed-Precision Iterative Refinement Solvers and Achieve 74 Gflops/Watt on Nvidia V100}, year = {2018}, month = {2018-03}, publisher = {GPU Technology Conference (GTC), Poster}, address = {San Jose, CA}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {1199, title = {Investigating Power Capping toward Energy-Efficient Scientific Applications}, journal = {Concurrency Computation: Practice and Experience}, volume = {2018}, year = {2018}, month = {2018-04}, pages = {1-14}, abstract = {The emergence of power efficiency as a primary constraint in processor and system design poses new challenges concerning power and energy awareness for numerical libraries and scientific applications. Power consumption also plays a major role in the design of data centers, which may house petascale or exascale-level computing systems. At these extreme scales, understanding and improving the energy efficiency of numerical libraries and their related applications becomes a crucial part of the successful implementation and operation of the computing system. In this paper, we study and investigate the practice of controlling a compute system{\textquoteright}s power usage, and we explore how different power caps affect the performance of numerical algorithms with different computational intensities. Further, we determine the impact, in terms of performance and energy usage, that these caps have on a system running scientific applications. This analysis will enable us to characterize the types of algorithms that benefit most from these power management schemes. Our experiments are performed using a set of representative kernels and several popular scientific benchmarks. We quantify a number of power and performance measurements and draw observations and conclusions that can be viewed as a roadmap to achieving energy efficiency in the design and execution of scientific algorithms.}, keywords = {energy efficiency, High Performance Computing, Intel Xeon Phi, Knights landing, papi, performance analysis, Performance Counters, power efficiency}, doi = {https://doi.org/10.1002/cpe.4485}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cpe.4485}, author = {Azzam Haidar and Heike Jagode and Phil Vaccaro and Asim YarKhan and Stanimire Tomov and Jack Dongarra} } @article {1330, title = {MATEDOR: MAtrix, TEnsor, and Deep-learning Optimized Routines}, year = {2018}, month = {2018-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC18), Research Poster}, address = {Dallas, TX}, author = {Ahmad Abdelfattah and Jack Dongarra and Azzam Haidar and Stanimire Tomov and Ichitaro Yamazaki} } @article {1333, title = {MAtrix, TEnsor, and Deep-learning Optimized Routines (MATEDOR)}, year = {2018}, month = {2018-04}, publisher = {NSF PI Meeting, Poster}, address = {Washington, DC}, doi = {https://doi.org/10.6084/m9.figshare.6174143.v3}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Ichitaro Yamazaki and Jack Dongarra} } @conference {1210, title = {Optimizing GPU Kernels for Irregular Batch Workloads: A Case Study for Cholesky Factorization}, booktitle = {IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}18)}, year = {2018}, month = {2018-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {This paper introduces several frameworks for the design and implementation of high performance GPU kernels that target batch workloads with irregular sizes. Such workloads are ubiquitous in many scientific applications, including sparse direct solvers, astrophysics, and quantum chemistry. The paper addresses two main categories of frameworks, taking the Cholesky factorization as a case study. The first uses hostside kernel launches, and the second uses device-side launches. Within each category, different design options are introduced, with an emphasis on the advantages and the disadvantages of each approach. Our best performing design outperforms the state-of-the-art CPU implementation, scoring up to 4.7{\texttimes} speedup in double precision on a Pascal P100 GPU.}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1258, title = {The Singular Value Decomposition: Anatomy of Optimizing an Algorithm for Extreme Scale}, journal = {SIAM Review}, volume = {60}, year = {2018}, month = {2018-11}, pages = {808{\textendash}865}, abstract = {The computation of the singular value decomposition, or SVD, has a long history with many improvements over the years, both in its implementations and algorithmically. Here, we survey the evolution of SVD algorithms for dense matrices, discussing the motivation and performance impacts of changes. There are two main branches of dense SVD methods: bidiagonalization and Jacobi. Bidiagonalization methods started with the implementation by Golub and Reinsch in Algol60, which was subsequently ported to Fortran in the EISPACK library, and was later more efficiently implemented in the LINPACK library, targeting contemporary vector machines. To address cache-based memory hierarchies, the SVD algorithm was reformulated to use Level 3 BLAS in the LAPACK library. To address new architectures, ScaLAPACK was introduced to take advantage of distributed computing, and MAGMA was developed for accelerators such as GPUs. Algorithmically, the divide and conquer and MRRR algorithms were developed to reduce the number of operations. Still, these methods remained memory bound, so two-stage algorithms were developed to reduce memory operations and increase the computational intensity, with efficient implementations in PLASMA, DPLASMA, and MAGMA. Jacobi methods started with the two-sided method of Kogbetliantz and the one-sided method of Hestenes. They have likewise had many developments, including parallel and block versions and preconditioning to improve convergence. In this paper, we investigate the impact of these changes by testing various historical and current implementations on a common, modern multicore machine and a distributed computing platform. We show that algorithmic and implementation improvements have increased the speed of the SVD by several orders of magnitude, while using up to 40 times less energy.}, keywords = {bidiagonal matrix, bisection, Divide and conquer, Hestenes method, Jacobi method, Kogbetliantz method, MRRR, QR iteration, Singular value decomposition, SVD}, issn = {0036-1445}, doi = {10.1137/17M1117732}, url = {https://epubs.siam.org/doi/10.1137/17M1117732}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @article {1334, title = {Tensor Contractions using Optimized Batch GEMM Routines}, year = {2018}, month = {2018-03}, publisher = {GPU Technology Conference (GTC), Poster}, address = {San Jose, CA}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1332, title = {Using GPU FP16 Tensor Cores Arithmetic to Accelerate Mixed-Precision Iterative Refinement Solvers and Reduce Energy Consumption}, year = {2018}, month = {2018-06}, publisher = {ISC High Performance (ISC18), Best Poster Award}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Mawussi Zounon and Jack Dongarra} } @conference {1265, title = {Using GPU FP16 Tensor Cores Arithmetic to Accelerate Mixed-Precision Iterative Refinement Solvers and Reduce Energy Consumption}, booktitle = {ISC High Performance (ISC{\textquoteright}18), Best Poster}, year = {2018}, month = {2018-06}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Stanimire Tomov and Ahmad Abdelfattah and Mawussi Zounon and Jack Dongarra} } @article {1341, title = {Accelerating Tensor Contractions in High-Order FEM with MAGMA Batched}, year = {2017}, month = {2017-03}, publisher = {SIAM Conference on Computer Science and Engineering (SIAM CSE17), Presentation}, address = {Atlanta, GA}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @techreport {1175, title = {C++ API for Batch BLAS}, journal = {SLATE Working Notes}, number = {04, ICL-UT-17-12}, year = {2017}, month = {2017-12}, publisher = {University of Tennessee}, author = {Ahmad Abdelfattah and Konstantin Arturov and Cris Cecka and Jack Dongarra and Chip Freitag and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Panruo Wu} } @article {1103, title = {Factorization and Inversion of a Million Matrices using GPUs: Challenges and Countermeasures}, journal = {Procedia Computer Science}, volume = {108}, year = {2017}, month = {2017-06}, pages = {606{\textendash}615}, abstract = {This paper presents new algorithmic approaches and optimization techniques for LU factorization and matrix inversion of millions of very small matrices using GPUs. These problems appear in many scientific applications including astrophysics and generation of block-Jacobi preconditioners. We show that, for very small problem sizes, design and optimization of GPU kernels require a mindset different from the one usually used when designing LAPACK algorithms for GPUs. Techniques for optimal memory traffic, register blocking, and tunable concurrency are incorporated in our proposed design. We also take advantage of the small matrix sizes to eliminate the intermediate row interchanges in both the factorization and inversion kernels. The proposed GPU kernels achieve performance speedups vs. CUBLAS of up to 6{\texttimes} for the factorization, and 14{\texttimes} for the inversion, using double precision arithmetic on a Pascal P100 GPU.}, doi = {https://doi.org/10.1016/j.procs.2017.05.250}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1102, title = {Fast Cholesky Factorization on GPUs for Batch and Native Modes in MAGMA}, journal = {Journal of Computational Science}, volume = {20}, year = {2017}, month = {2017-05}, pages = {85{\textendash}93}, abstract = {This paper presents a GPU-accelerated Cholesky factorization for two different modes of operation. The first one is the batch mode, where many independent factorizations on small matrices can be performed concurrently. This mode supports fixed size and variable size problems, and is found in many scientific applications. The second mode is the native mode, where one factorization is performed on a large matrix without any CPU involvement, which allows the CPU do other useful work. We show that, despite the different workloads, both modes of operation share a common code-base that uses the GPU only. We also show that the developed routines achieve significant speedups against a multicore CPU using the MKL library, and against a GPU implementation by cuSOLVER. This work is part of the MAGMA library.}, keywords = {GPU computing; Cholesky factorization; Batched execution}, doi = {https://doi.org/10.1016/j.jocs.2016.12.009}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1083, title = {A Framework for Out of Memory SVD Algorithms}, journal = {ISC High Performance 2017}, year = {2017}, month = {2017-06}, pages = {158{\textendash}178}, abstract = {Many important applications {\textendash} from big data analytics to information retrieval, gene expression analysis, and numerical weather prediction {\textendash} require the solution of large dense singular value decompositions (SVD). In many cases the problems are too large to fit into the computer{\textquoteright}s main memory, and thus require specialized out-of-core algorithms that use disk storage. In this paper, we analyze the SVD communications, as related to hierarchical memories, and design a class of algorithms that minimizes them. This class includes out-of-core SVDs but can also be applied between other consecutive levels of the memory hierarchy, e.g., GPU SVD using the CPU memory for large problems. We call these out-of-memory (OOM) algorithms. To design OOM SVDs, we first study the communications for both classical one-stage blocked SVD and two-stage tiled SVD. We present the theoretical analysis and strategies to design, as well as implement, these communication avoiding OOM SVD algorithms. We show performance results for multicore architecture that illustrate our theoretical findings and match our performance models.}, doi = {https://doi.org/10.1007/978-3-319-58667-0_9}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Aurelien Bouteiller and Jack Dongarra} } @conference {1142, title = {High-performance Cholesky Factorization for GPU-only Execution}, booktitle = {Proceedings of the General Purpose GPUs (GPGPU-10)}, year = {2017}, month = {2017-02}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {We present our performance analysis, algorithm designs, and the optimizations needed for the development of high-performance GPU-only algorithms, and in particular, for the dense Cholesky factorization. In contrast to currently promoted designs that solve parallelism challenges on multicore architectures by representing algorithms as Directed Acyclic Graphs (DAGs), where nodes are tasks of fine granularity and edges are the dependencies between the tasks, our designs explicitly target manycore architectures like GPUs and feature coarse granularity tasks (that can be hierarchically split into fine grain data-parallel subtasks). Furthermore, in contrast to hybrid algorithms that schedule difficult to parallelize tasks on CPUs, we develop highly-efficient code for entirely GPU execution. GPU-only codes remove the expensive CPU-to-GPU communications and the tuning challenges related to slow CPU and/or low CPU-to-GPU bandwidth. We show that on latest GPUs, like the P100, this becomes so important that the GPU-only code even outperforms the hybrid MAGMA algorithms when the CPU tasks and communications can not be entirely overlapped with GPU computations. We achieve up to 4,300 GFlop/s in double precision on a P100 GPU, which is about 7-8{\texttimes} faster than high-end multicore CPUs, e.g., two 10-cores Intel Xeon E5-2650 v3 Haswell CPUs, where MKL runs up to about 500-600 Gflop/s. The new algorithm also outperforms significantly the GPU-only implementation currently available in the NVIDIA cuSOLVER library.}, doi = {https://doi.org/10.1145/3038228.3038237}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @conference {1140, title = {Investigating Half Precision Arithmetic to Accelerate Dense Linear System Solvers}, booktitle = {ScalA17: 8th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems}, year = {2017}, month = {11/2017}, publisher = {ACM}, organization = {ACM}, address = {Denver, CO}, abstract = {The use of low-precision arithmetic in mixed-precision computing methods has been a powerful tool to accelerate numerous scientific computing applications. Artificial intelligence (AI) in particular has pushed this to current extremes, making use of half-precision floating-point arithmetic (FP16) in approaches based on neural networks. The appeal of FP16 is in the high performance that can be achieved using it on today{\textquoteright}s powerful manycore GPU accelerators, e.g., like the NVIDIA V100, that can provide 120 TeraFLOPS alone in FP16. We present an investigation showing that other HPC applications can harness this power too, and in particular, the general HPC problem of solving Ax = b, where A is a large dense matrix, and the solution is needed in FP32 or FP64 accuracy. Our approach is based on the mixed-precision iterative refinement technique {\textendash} we generalize and extend prior advances into a framework, for which we develop architecture-specific algorithms and highly-tuned implementations that resolve the main computational challenges of efficiently parallelizing, scaling, and using FP16 arithmetic in the approach on high-end GPUs. Subsequently, we show for a first time how the use of FP16 arithmetic can significantly accelerate, as well as make more energy efficient, FP32 or FP64-precision Ax = b solvers. Our results are reproducible and the developments will be made available through the MAGMA library. We quantify in practice the performance, and limitations of the approach.}, author = {Azzam Haidar and Panruo Wu and Stanimire Tomov and Jack Dongarra} } @article {1340, title = {MAGMA Tensors and Batched Computing for Accelerating Applications on GPUs}, year = {2017}, month = {2017-05}, publisher = {GPU Technology Conference (GTC17), Presentation in Session S7728}, address = {San Jose, CA}, author = {Stanimire Tomov and Azzam Haidar} } @article {1337, title = {MagmaDNN {\textendash} High-Performance Data Analytics for Manycore GPUs and CPUs}, year = {2017}, month = {2017-12}, publisher = {2017 Summer Research Experiences for Undergraduate (REU), Presentation}, address = {Knoxville, TN}, author = {Lucien Ng and Kwai Wong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1084, title = {Novel HPC Techniques to Batch Execution of Many Variable Size BLAS Computations on GPUs}, booktitle = {International Conference on Supercomputing (ICS {\textquoteright}17)}, year = {2017}, month = {2017-06}, publisher = {ACM}, organization = {ACM}, address = {Chicago, Illinois}, doi = {10.1145/3079079.3079103}, url = {http://dl.acm.org/citation.cfm?id=3079103}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1085, title = {Optimizing the SVD Bidiagonalization Process for a Batch of Small Matrices}, booktitle = {International Conference on Computational Science (ICCS 2017)}, year = {2017}, month = {2017-06}, publisher = {Procedia Computer Science}, organization = {Procedia Computer Science}, address = {Zurich, Switzerland}, abstract = {A challenging class of problems arising in many GPU applications, called batched problems, involves linear algebra operations on many small-sized matrices. We designed batched BLAS (Basic Linear Algebra Subroutines) routines, and in particular the Level-2 BLAS GEMV and the Level-3 BLAS GEMM routines, to solve them. We proposed device functions and big-tile settings in our batched BLAS design. We adopted auto-tuning to optimize different instances of GEMV routines. We illustrated our batched BLAS approach to optimize batched bi-diagonalization progressively on a K40c GPU. The optimization techniques in this paper are applicable to the other two-sided factorizations as well.}, doi = {https://doi.org/10.1016/j.procs.2017.05.237}, url = {http://www.sciencedirect.com/science/article/pii/S1877050917308645}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {1141, title = {Out of Memory SVD Solver for Big Data}, booktitle = {2017 IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}17)}, year = {2017}, month = {2017-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {Many applications {\textendash} from data compression to numerical weather prediction and information retrieval {\textendash} need to compute large dense singular value decompositions (SVD). When the problems are too large to fit into the computer{\textquoteright}s main memory, specialized out-of-core algorithms that use disk storage are required. A typical example is when trying to analyze a large data set through tools like MATLAB or Octave, but the data is just too large to be loaded. To overcome this, we designed a class of out-of-memory (OOM) algorithms to reduce, as well as overlap communication with computation. Of particular interest is OOM algorithms for matrices of size m{\texttimes}n, where m >> n or m << n, e.g., corresponding to cases of too many variables, or too many observations. To design OOM SVDs, we first study the communications cost for the SVD techniques as well as for the QR/LQ factorization followed by SVD. We present the theoretical analysis about the data movement cost and strategies to design OOM SVD algorithms. We show performance results for multicore architecture that illustrate our theoretical findings and match our performance models. Moreover, our experimental results show the feasibility and superiority of the OOM SVD.}, author = {Azzam Haidar and Khairul Kabir and Diana Fayad and Stanimire Tomov and Jack Dongarra} } @techreport {1173, title = {PLASMA 17 Performance Report}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-11}, year = {2017}, month = {2017-06}, publisher = {University of Tennessee}, abstract = {PLASMA (Parallel Linear Algebra for Multicore Architectures) is a dense linear algebra package at the forefront of multicore computing. PLASMA is designed to deliver the highest possible performance from a system with multiple sockets of multicore processors. PLASMA achieves this objective by combining state of the art solutions in parallel algorithms, scheduling, and software engineering. PLASMA currently offers a collection of routines for solving linear systems of equations and least square problems.}, author = {Maksims Abalenkovs and Negin Bagherpour and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Samuel Relton and Jakub Sistek and David Stevens and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Mawussi Zounon} } @techreport {1172, title = {PLASMA 17.1 Functionality Report}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-10}, year = {2017}, month = {2017-06}, publisher = {University of Tennessee}, abstract = {PLASMA (Parallel Linear Algebra for Multicore Architectures) is a dense linear algebra package at the forefront of multicore computing. PLASMA is designed to deliver the highest possible performance from a system with multiple sockets of multicore processors. PLASMA achieves this objective by combining state of the art solutions in parallel algorithms, scheduling, and software engineering. PLASMA currently offers a collection of routines for solving linear systems of equations and least square problems.}, author = {Maksims Abalenkovs and Negin Bagherpour and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Samuel Relton and Jakub Sistek and David Stevens and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan and Mawussi Zounon} } @techreport {1171, title = {POMPEI: Programming with OpenMP4 for Exascale Investigations}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-17-09}, year = {2017}, month = {2017-12}, publisher = {University of Tennessee}, abstract = {The objective of the Programming with OpenMP4 for Exascale Investigations (POMPEI) project is to explore new task-based programming techniques together with data structure centric programming for scientific applications to harness the potential of extreme-scale systems. Tasking is a well established by now approach on such systems as it has been used successfully to handle their large-scale parallelism and heterogeneity, which are leading challenges on the way to exascale computing. The approach is to harness the latest features of OpenMP4.5 and OpenACC2.5 to design abstractions shared among tasks and mapped efficiently to data-structure driven programming paradigms. This technical report describes the approach, along with its reference implementation and results for dense linear algebra algorithms.}, author = {Jack Dongarra and Azzam Haidar and Oscar Hernandez and Stanimire Tomov and Manjunath Gorentla Venkata} } @conference {1134, title = {Power-aware Computing: Measurement, Control, and Performance Analysis for Intel Xeon Phi}, booktitle = {2017 IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}17), Best Paper Finalist}, year = {2017}, month = {2017-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {The emergence of power efficiency as a primary constraint in processor and system designs poses new challenges concerning power and energy awareness for numerical libraries and scientific applications. Power consumption also plays a major role in the design of data centers in particular for peta- and exa- scale systems. Understanding and improving the energy efficiency of numerical simulation becomes very crucial. We present a detailed study and investigation toward control- ling power usage and exploring how different power caps affect the performance of numerical algorithms with different computa- tional intensities, and determine the impact and correlation with performance of scientific applications. Our analyses is performed using a set of representatives kernels, as well as many highly used scientific benchmarks. We quantify a number of power and performance measurements, and draw observations and conclusions that can be viewed as a roadmap toward achieving energy efficiency computing algorithms.}, doi = {https://doi.org/10.1109/HPEC.2017.8091085}, author = {Azzam Haidar and Heike Jagode and Asim YarKhan and Phil Vaccaro and Stanimire Tomov and Jack Dongarra} } @article {1338, title = {Power-Aware HPC on Intel Xeon Phi KNL Processors}, year = {2017}, month = {2017-06}, publisher = {ISC High Performance (ISC17), Intel Booth Presentation}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Heike Jagode and Asim YarKhan and Phil Vaccaro and Stanimire Tomov and Jack Dongarra} } @techreport {1080, title = {Roadmap for the Development of a Linear Algebra Library for Exascale Computing: SLATE: Software for Linear Algebra Targeting Exascale}, journal = {SLATE Working Notes}, number = {01, ICL-UT-17-02}, year = {2017}, month = {2017-06}, publisher = {Innovative Computing Laboratory, University of Tennessee}, type = {SLATE Working Notes}, author = {Ahmad Abdelfattah and Hartwig Anzt and Aurelien Bouteiller and Anthony Danalis and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Stephen Wood and Panruo Wu and Ichitaro Yamazaki and Asim YarKhan} } @techreport {1082, title = {Small Tensor Operations on Advanced Architectures for High-Order Applications}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-17-749}, year = {2017}, month = {2017-04}, publisher = {Innovative Computing Laboratory, University of Tennessee}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @article {, title = {With Extreme Computing, the Rules Have Changed}, journal = {Computing in Science \& Engineering}, volume = {19}, year = {2017}, month = {2017-05}, pages = {52-62}, abstract = {On the eve of exascale computing, traditional wisdom no longer applies. High-performance computing is gone as we know it. This article discusses a range of new algorithmic techniques emerging in the context of exascale computing, many of which defy the common wisdom of high-performance computing and are considered unorthodox, but could turn out to be a necessity in near future.}, doi = {https://doi.org/10.1109/MCSE.2017.48}, author = {Jack Dongarra and Stanimire Tomov and Piotr Luszczek and Jakub Kurzak and Mark Gates and Ichitaro Yamazaki and Hartwig Anzt and Azzam Haidar and Ahmad Abdelfattah} } @article {1342, title = {Accelerating Tensor Contractions for High-Order FEM on CPUs, GPUs, and KNLs}, year = {2016}, month = {2016-09}, publisher = {moky Mountains Computational Sciences and Engineering Conference (SMC16), Poster}, address = {Gatlinburg, TN}, author = {Azzam Haidar and Ahmad Abdelfattah and Veselin Dobrev and Ian Karlin and Tzanio Kolev and Stanimire Tomov and Jack Dongarra} } @article {1344, title = {Cholesky Factorization on Batches of Matrices with Fixed and Variable Sizes}, year = {2016}, month = {2016-04}, publisher = {GPU Technology Conference (GTC16), Poster}, address = {San Jose, CA}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {940, title = {On the Development of Variable Size Batched Computation for Heterogeneous Parallel Architectures}, booktitle = {The 17th IEEE International Workshop on Parallel and Distributed Scientific and Engineering Computing (PDSEC 2016), IPDPS 2016}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {

Many scientific applications, ranging from national security to medical advances, require solving a number of relatively small-size independent problems. As the size of each individual problem does not provide sufficient parallelism for the underlying hardware, especially accelerators, these problems must be solved concurrently as a batch in order to saturate the hardware with enough work, hence the name batched computation. A possible simplification is to assume a uniform size for all problems. However, real applications do not necessarily satisfy such assumption. Consequently, an efficient solution for variable-size batched computations is required.

This paper proposes a foundation for high performance variable-size batched matrix computation based on Graphics Processing Units (GPUs). Being throughput-oriented processors, GPUs favor regular computation and less divergence among threads, in order to achieve high performance. Therefore, the development of high performance numerical software for this kind of problems is challenging. As a case study, we developed efficient batched Cholesky factorization algorithms for relatively small matrices of different sizes. However, most of the strategies and the software developed, and in particular a set of variable size batched BLAS kernels, can be used in many other dense matrix factorizations, large scale sparse direct multifrontal solvers, and applications. We propose new interfaces and mechanisms to handle the irregular computation pattern on the GPU. According to the authors{\textquoteright} knowledge, this is the first attempt to develop high performance software for this class of problems. Using a K40c GPU, our performance tests show speedups of up to 2:5 against two Sandy Bridge CPUs (8-core each) running Intel MKL library.

}, keywords = {batched computation, GPUs, variable small sizes}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {939, title = {Heterogeneous Streaming}, booktitle = {The Sixth International Workshop on Accelerators and Hybrid Exascale Systems (AsHES), IPDPS 2016}, year = {2016}, month = {2016-05}, publisher = {IEEE}, organization = {IEEE}, address = {Chicago, IL}, abstract = {This paper introduces a new heterogeneous streaming library called hetero Streams (hStreams). We show how a simple FIFO streaming model can be applied to heterogeneous systems that include manycore coprocessors and multicore CPUs. This model supports concurrency across nodes, among tasks within a node, and between data transfers and computation. We give examples for different approaches, show how the implementation can be layered, analyze overheads among layers, and apply those models to parallelize applications using simple, intuitive interfaces. We compare the features and versatility of hStreams, OpenMP, CUDA Streams1 and OmpSs. We show how the use of hStreams makes it easier for scientists to identify tasks and easily expose concurrency among them, and how it enables tuning experts and runtime systems to tailor execution for different heterogeneous targets. Practical application examples are taken from the field of numerical linear algebra, commercial structural simulation software, and a seismic processing application.}, keywords = {plasma}, author = {Chris J. Newburn and Gaurav Bansal and Michael Wood and Luis Crivelli and Judit Planas and Alejandro Duran and Paulo Souza and Leonardo Borges and Piotr Luszczek and Stanimire Tomov and Jack Dongarra and Hartwig Anzt and Mark Gates and Azzam Haidar and Yulu Jia and Khairul Kabir and Ichitaro Yamazaki and Jesus Labarta} } @conference {964, title = {High-performance Matrix-matrix Multiplications of Very Small Matrices}, booktitle = {22nd International European Conference on Parallel and Distributed Computing (Euro-Par{\textquoteright}16)}, year = {2016}, month = {2016-08}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Grenoble, France}, abstract = {The use of the general dense matrix-matrix multiplication (GEMM) is fundamental for obtaining high performance in many scientific computing applications. GEMMs for small matrices (of sizes less than 32) however, are not sufficiently optimized in existing libraries. In this paper we consider the case of many small GEMMs on either CPU or GPU architectures. This is a case that often occurs in applications like big data analytics, machine learning, high-order FEM, and others. The GEMMs are grouped together in a single batched routine. We present specialized for these cases algorithms and optimization techniques to obtain performance that is within 90\% of the optimal. We show that these results outperform currently available state-of-the-art implementations and vendor-tuned math libraries.}, author = {Ian Masliah and Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jo{\"e}l Falcou and Jack Dongarra} } @conference {942, title = {High-Performance Tensor Contractions for GPUs}, booktitle = {International Conference on Computational Science (ICCS{\textquoteright}16)}, year = {2016}, month = {2016-06}, address = {San Diego, CA}, abstract = {We present a computational framework for high-performance tensor contractions on GPUs. High-performance is difficult to obtain using existing libraries, especially for many independent contractions where each contraction is very small, e.g., sub-vector/warp in size. However, using our framework to batch contractions plus application-specifics, we demonstrate close to peak performance results. In particular, to accelerate large scale tensor-formulated high-order finite element method (FEM) simulations, which is the main focus and motivation for this work, we represent contractions as tensor index reordering plus matrix-matrix multiplications (GEMMs). This is a key factor to achieve algorithmically many-fold acceleration (vs. not using it) due to possible reuse of data loaded in fast memory. In addition to using this context knowledge, we design tensor data-structures, tensor algebra interfaces, and new tensor contraction algorithms and implementations to achieve 90+\% of a theoretically derived peak on GPUs. On a K40c GPU for contractions resulting in GEMMs on square matrices of size 8 for example, we are 2.8{\texttimes} faster than CUBLAS, and 8.5{\texttimes} faster than MKL on 16 cores of Intel Xeon E5-2670 (Sandy Bridge) 2.60GHz CPUs. Finally, we apply autotuning and code generation techniques to simplify tuning and provide an architecture-aware, user-friendly interface.}, keywords = {Applications, Batched linear algebra, FEM, gpu, Tensor contractions, Tensor HPC}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @techreport {929, title = {High-Performance Tensor Contractions for GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-738}, year = {2016}, month = {2016-01}, publisher = {University of Tennessee}, abstract = {We present a computational framework for high-performance tensor contractions on GPUs. High-performance is difficult to obtain using existing libraries, especially for many independent contractions where each contraction is very small, e.g., sub-vector/warp in size. However, using our framework to batch contractions plus application-specifics, we demonstrate close to peak performance results. In particular, to accelerate large scale tensor-formulated high-order finite element method (FEM) simulations, which is the main focus and motivation for this work, we represent contractions as tensor index reordering plus matrix-matrix multiplications (GEMMs). This is a key factor to achieve algorithmically many-fold acceleration (vs. not using it) due to possible reuse of data loaded in fast memory. In addition to using this context knowledge, we design tensor data-structures, tensor algebra interfaces, and new tensor contraction algorithms and implementations to achieve 90+\% of a theoretically derived peak on GPUs. On a K40c GPU for contractions resulting in GEMMs on square matrices of size 8 for example, we are 2.8{\texttimes} faster than CUBLAS, and 8.5{\texttimes} faster than MKL on 16 cores of Intel Xeon ES-2670 (Sandy Bridge) 2.60GHz CPUs. Finally, we apply autotuning and code generation techniques to simplify tuning and provide an architecture-aware, user-friendly interface.}, author = {Ahmad Abdelfattah and Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @article {1472, title = {Linear Algebra Software for Large-Scale Accelerated Multicore Computing}, journal = {Acta Numerica}, volume = {25}, year = {2016}, month = {2016-05}, pages = {1-160}, abstract = {Many crucial scientific computing applications, ranging from national security to medical advances, rely on high-performance linear algebra algorithms and technologies, underscoring their importance and broad impact. Here we present the state-of-the-art design and implementation practices for the acceleration of the predominant linear algebra algorithms on large-scale accelerated multicore systems. Examples are given with fundamental dense linear algebra algorithms {\textendash} from the LU, QR, Cholesky, and LDLT factorizations needed for solving linear systems of equations, to eigenvalue and singular value decomposition (SVD) problems. The implementations presented are readily available via the open-source PLASMA and MAGMA libraries, which represent the next generation modernization of the popular LAPACK library for accelerated multicore systems. To generate the extreme level of parallelism needed for the efficient use of these systems, algorithms of interest are redesigned and then split into well-chosen computational tasks. The task execution is scheduled over the computational components of a hybrid system of multicore CPUs with GPU accelerators and/or Xeon Phi coprocessors, using either static scheduling or light-weight runtime systems. The use of light-weight runtime systems keeps scheduling overheads low, similar to static scheduling, while enabling the expression of parallelism through sequential-like code. This simplifies the development effort and allows exploration of the unique strengths of the various hardware components. Finally, we emphasize the development of innovative linear algebra algorithms using three technologies {\textendash} mixed precision arithmetic, batched operations, and asynchronous iterations {\textendash} that are currently of high interest for accelerated multicore systems.}, doi = {10.1017/S0962492916000015}, author = {Ahmad Abdelfattah and Hartwig Anzt and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and undefined and Asim YarKhan} } @conference {963, title = {LU, QR, and Cholesky Factorizations: Programming Model, Performance Analysis and Optimization Techniques for the Intel Knights Landing Xeon Phi}, booktitle = {IEEE High Performance Extreme Computing Conference (HPEC{\textquoteright}16)}, year = {2016}, month = {2016-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {A wide variety of heterogeneous compute resources, ranging from multicore CPUs to GPUs and coprocessors, are available to modern computers, making it challenging to design unified numerical libraries that efficiently and productively use all these varied resources. For example, in order to efficiently use Intel{\textquoteright}s Knights Langing (KNL) processor, the next-generation of Xeon Phi architectures, one must design and schedule an application in multiple degrees of parallelism and task grain sizes in order to obtain efficient performance. We propose a productive and portable programming model that allows us to write a serial-looking code, which, however, achieves parallelism and scalability by using a lightweight runtime environment to manage the resource-specific workload, and to control the dataflow and the parallel execution. This is done through multiple techniques ranging from multi-level data partitioning to adaptive task grain sizes, and dynamic task scheduling. In addition, our task abstractions enable unified algorithmic development across all the heterogeneous resources. Finally, we outline the strengths and the effectiveness of this approach {\textendash} especially in regards to hardware trends and ease of programming high-performance numerical software that current applications need {\textendash} in order to motivate current work and future directions for the next generation of parallel programming models for high-performance linear algebra libraries on heterogeneous systems.}, author = {Azzam Haidar and Stanimire Tomov and Konstantin Arturov and Murat Guney and Shane Story and Jack Dongarra} } @techreport {, title = {MAGMA Batched: A Batched BLAS Approach for Small Matrix Factorizations and Applications on GPUs}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-16-02}, year = {2016}, month = {2016-08}, publisher = {University of Tennessee}, abstract = {A particularly challenging class of problems arising in many applications, called batched problems, involves linear algebra operations on many small-sized matrices. We proposed and designed batched BLAS (Basic Linear Algebra Subroutines), Level-2 GEMV and Level-3 GEMM, to solve them. We illustrate how batched GEMV and GEMM to be able to assist batched advance factorization (e.g. bi-diagonalization) and other BLAS routines (e.g. triangular solve) to achieve optimal performance on GPUs. Our solutions achieved up to 2.8-3{\texttimes} speedups compared to CUBLAS and MKL solutions, wherever possible. We illustrated the batched methodology on a real-world Hydrodynamic application by reformulating the tensor operations into batched BLAS GEMV and GEMM operations. A 2.5{\texttimes} speedup and a 1.4{\texttimes} greenup are obtained by changing 10\% of the code. We accelerated and scaled it on Titan supercomputer to 4096 nodes.}, author = {Tingxing Dong and Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Ahmad Abdelfattah and Jack Dongarra} } @conference {968, title = {Performance Analysis and Acceleration of Explicit Integration for Large Kinetic Networks using Batched GPU Computations}, booktitle = {2016 IEEE High Performance Extreme Computing Conference (HPEC {\textquoteleft}16)}, year = {2016}, month = {2016-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {We demonstrate the systematic implementation of recently-developed fast explicit kinetic integration algorithms that solve efficiently N coupled ordinary differential equations (subject to initial conditions) on modern GPUs. We take representative test cases (Type Ia supernova explosions) and demonstrate two or more orders of magnitude increase in efficiency for solving such systems (of realistic thermonuclear networks coupled to fluid dynamics). This implies that important coupled, multiphysics problems in various scientific and technical disciplines that were intractable, or could be simulated only with highly schematic kinetic networks, are now computationally feasible. As examples of such applications we present the computational techniques developed for our ongoing deployment of these new methods on modern GPU accelerators. We show that similarly to many other scientific applications, ranging from national security to medical advances, the computation can be split into many independent computational tasks, each of relatively small-size. As the size of each individual task does not provide sufficient parallelism for the underlying hardware, especially for accelerators, these tasks must be computed concurrently as a single routine, that we call batched routine, in order to saturate the hardware with enough work.}, author = {Azzam Haidar and Benjamin Brock and Stanimire Tomov and Michael Guidry and Jay Jay Billings and Daniel Shyles and Jack Dongarra} } @techreport {934, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, journal = {University of Tennessee Computer Science Technical Report}, number = {UT-EECS-16-739}, year = {2016}, month = {2016-02}, publisher = {University of Tennessee}, abstract = {Abstract. The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra. It is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, there becomes a need to have a high performance GEMM kernel for a batch of small matrices. Such kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both xed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance test reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, keywords = {Autotuning, Batched GEMM, GEMM, GPU computing, HPC}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {944, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, booktitle = {The International Supercomputing Conference (ISC High Performance 2016)}, year = {2016}, month = {2016-06}, address = {Frankfurt, Germany}, abstract = {The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra, and is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, a need arises for a high performance GEMM kernel for batches of small matrices. Such a kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both fixed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance tests reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, keywords = {Autotuning, Batched GEMM, GEMM, GPU computing, HPC}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @inbook {997, title = {Performance, Design, and Autotuning of Batched GEMM for GPUs}, booktitle = {High Performance Computing: 31st International Conference, ISC High Performance 2016, Frankfurt, Germany, June 19-23, 2016, Proceedings}, number = {9697}, year = {2016}, pages = {21{\textendash}38}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {The general matrix-matrix multiplication (GEMM) is the most important numerical kernel in dense linear algebra, and is the key component for obtaining high performance in most LAPACK routines. As batched computations on relatively small problems continue to gain interest in many scientific applications, a need arises for a high performance GEMM kernel for batches of small matrices. Such a kernel should be well designed and tuned to handle small sizes, and to maintain high performance for realistic test cases found in the higher level LAPACK routines, and scientific computing applications in general. This paper presents a high performance batched GEMM kernel on Graphics Processing Units (GPUs). We address batched problems with both fixed and variable sizes, and show that specialized GEMM designs and a comprehensive autotuning process are needed to handle problems of small sizes. For most performance tests reported in this paper, the proposed kernels outperform state-of-the-art approaches using a K40c GPU.}, isbn = {978-3-319-41321-1}, doi = {10.1007/978-3-319-41321-1_2}, url = {http://dx.doi.org/10.1007/978-3-319-41321-1_2}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra}, editor = {Julian M. Kunkel and Pavan Balaji and Jack Dongarra} } @conference {943, title = {Performance Tuning and Optimization Techniques of Fixed and Variable Size Batched Cholesky Factorization on GPUs}, booktitle = {International Conference on Computational Science (ICCS{\textquoteright}16)}, year = {2016}, month = {2016-06}, address = {San Diego, CA}, abstract = {

Solving a large number of relatively small linear systems has recently drawn more attention in the HPC community, due to the importance of such computational workloads in many scientific applications, including sparse multifrontal solvers. Modern hardware accelerators and their architecture require a set of optimization techniques that are very different from the ones used in solving one relatively large matrix. In order to impose concurrency on such throughput-oriented architectures, a common practice is to batch the solution of these matrices as one task offloaded to the underlying hardware, rather than solving them individually.

This paper presents a high performance batched Cholesky factorization on large sets of relatively small matrices using Graphics Processing Units (GPUs), and addresses both fixed and variable size batched problems. We investigate various algorithm designs and optimization techniques, and show that it is essential to combine kernel design with performance tuning in order to achieve the best possible performance. We compare our approaches against state-of-the-art CPU solutions as well as GPU-based solutions using existing libraries, and show that, on a K40c GPU for example, our kernels are more than 2 faster.

}, keywords = {batched computation, Cholesky Factorization, GPUs, Tuning}, author = {Ahmad Abdelfattah and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1343, title = {A Standard for Batched BLAS Routines}, year = {2016}, month = {2016-04}, publisher = {17th SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP16)}, address = {Paris, France}, author = {Pedro Valero-Lara and Jack Dongarra and Azzam Haidar and Samuel D. Relton and Stanimire Tomov and Mawussi Zounon} } @conference {977, title = {Towards Achieving Performance Portability Using Directives for Accelerators}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC{\textquoteright}16), Third Workshop on Accelerator Programming Using Directives (WACCPD)}, year = {2016}, month = {2016-11}, publisher = {Innovative Computing Laboratory, University of Tennessee}, organization = {Innovative Computing Laboratory, University of Tennessee}, address = {Salt Lake City, Utah}, abstract = {In this paper we explore the performance portability of directives provided by OpenMP 4 and OpenACC to program various types of node architectures with attached accelerators, both self-hosted multicore and offload multicore/GPU. Our goal is to examine how successful OpenACC and the newer of- fload features of OpenMP 4.5 are for moving codes between architectures, how much tuning might be required and what lessons we can learn from this experience. To do this, we use examples of algorithms with varying computational intensities for our evaluation, as both compute and data access efficiency are important considerations for overall application performance. We implement these kernels using various methods provided by newer OpenACC and OpenMP implementations, and we evaluate their performance on various platforms including both X86 64 with attached NVIDIA GPUs, self-hosted Intel Xeon Phi KNL, as well as an X86 64 host system with Intel Xeon Phi coprocessors. In this paper, we explain what factors affected the performance portability such as how to pick the right programming model, its programming style, its availability on different platforms, and how well compilers can optimize and target to multiple platforms.}, author = {M. Graham Lopez and Larrea, V and Joubert, W and Hernandez, O and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {843, title = {Batched Matrix Computations on Hardware Accelerators}, booktitle = {EuroMPI/Asia 2015 Workshop}, year = {2015}, month = {2015-09}, address = {Bordeaux, France}, abstract = {Scientific applications require solvers that work on many small size problems that are independent from each other. At the same time, the high-end hardware evolves rapidly and becomes ever more throughput-oriented and thus there is an increasing need for effective approach to develop energy efficient, high-performance codes for these small matrix problems that we call batched factorizations. The many applications that need this functionality could especially benefit from the use of GPUs, which currently are four to five times more energy efficient than multicore CPUs on important scientific workloads. This paper, consequently, describes the development of the most common, one-sided factorizations: Cholesky, LU, and QR for a set of small dense matrices. The algorithms we present together with their implementations are, by design, inherently parallel. In particular, our approach is based on representing the process as a sequence of batched BLAS routines that are executed entirely on a GPU. Importantly, this is unlike the LAPACK and the hybridMAGMAfactorization algorithms that work under drastically different assumptions of hardware design and efficiency of execution of the various computational kernels involved in the implementation. Thus, our approach is more efficient than what works for a combination of multicore CPUs and GPUs for the problems sizes of interest of the application use cases. The paradigm where upon a single chip (a GPU or a CPU) factorizes a single problem at a time is not at all efficient for in our applications{\textquoteright} context. We illustrate all these claims through a detailed performance analysis. With the help of profiling and tracing tools, we guide our development of batched factorizations to achieve up to two-fold speedup and three-fold better energy efficiency as compared against our highly optimized batched CPU implementations based on MKL library. The tested system featured two sockets of Intel Sandy Bridge CPUs and we compared to a batched LU factorizations featured in the CUBLAS library for GPUs, we achieve as high as 2.5x speedup on the NVIDIA K40 GPU.}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {895, title = {Batched Matrix Computations on Hardware Accelerators Based on GPUs}, booktitle = {2015 SIAM Conference on Applied Linear Algebra (SIAM LA)}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {We will present techniques for small matrix computations on GPUs and their use for energy efficient, high-performance solvers. Work on small problems delivers high performance through improved data reuse. Many numerical libraries and applications need this functionality further developed. We describe the main factorizations LU, QR, and Cholesky for a set of small dense matrices in parallel. We achieve significant acceleration and reduced energy consumption against other solutions. Our techniques are of interest to GPU application developers in general.}, author = {Azzam Haidar and Ahmad Abdelfattah and Stanimire Tomov and Jack Dongarra} } @article {858, title = {Batched matrix computations on hardware accelerators based on GPUs}, journal = {International Journal of High Performance Computing Applications}, year = {2015}, month = {2015-02}, abstract = {Scientific applications require solvers that work on many small size problems that are independent from each other. At the same time, the high-end hardware evolves rapidly and becomes ever more throughput-oriented and thus there is an increasing need for an effective approach to develop energy-efficient, high-performance codes for these small matrix problems that we call batched factorizations. The many applications that need this functionality could especially benefit from the use of GPUs, which currently are four to five times more energy efficient than multicore CPUs on important scientific workloads. This paper, consequently, describes the development of the most common, one-sided factorizations, Cholesky, LU, and QR, for a set of small dense matrices. The algorithms we present together with their implementations are, by design, inherently parallel. In particular, our approach is based on representing the process as a sequence of batched BLAS routines that are executed entirely on a GPU. Importantly, this is unlike the LAPACK and the hybrid MAGMA factorization algorithms that work under drastically different assumptions of hardware design and efficiency of execution of the various computational kernels involved in the implementation. Thus, our approach is more efficient than what works for a combination of multicore CPUs and GPUs for the problems sizes of interest of the application use cases. The paradigm where upon a single chip (a GPU or a CPU) factorizes a single problem at a time is not at all efficient in our applications{\textquoteright} context. We illustrate all of these claims through a detailed performance analysis. With the help of profiling and tracing tools, we guide our development of batched factorizations to achieve up to two-fold speedup and three-fold better energy efficiency as compared against our highly optimized batched CPU implementations based on MKL library. The tested system featured two sockets of Intel Sandy Bridge CPUs and we compared with a batched LU factorizations featured in the CUBLAS library for GPUs, we achieve as high as 2.5{\texttimes} speedup on the NVIDIA K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, doi = {10.1177/1094342014567546}, author = {Azzam Haidar and Tingxing Dong and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {928, title = {Cholesky Across Accelerators}, booktitle = {17th IEEE International Conference on High Performance Computing and Communications (HPCC 2015)}, year = {2015}, month = {2015-08}, publisher = {IEEE}, organization = {IEEE}, address = {Elizabeth, NJ}, author = {Asim YarKhan and Azzam Haidar and Chongxiao Cao and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {896, title = {Comparing Hybrid CPU-GPU and Native GPU-only Acceleration for Linear Algebra}, booktitle = {2015 SIAM Conference on Applied Linear Algebra}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {Accelerating dense linear algebra using GPUs admits two models: hybrid CPU-GPU and GPU-only. The hybrid model factors the panel on the CPU while updating the trailing matrix on the GPU, concentrating the GPU on high-performance matrix multiplies. The GPU-only model performs the entire computation on the GPU, avoiding costly data transfers to the CPU. We compare these two approaches for three QR-based algorithms: QR factorization, rank revealing QR, and reduction to Hessenberg.}, author = {Mark Gates and Stanimire Tomov and Azzam Haidar} } @conference {842, title = { A Data Flow Divide and Conquer Algorithm for Multicore Architecture}, booktitle = {29th IEEE International Parallel \& Distributed Processing Symposium (IPDPS)}, year = {2015}, month = {2015-05}, publisher = {IEEE}, organization = {IEEE}, address = {Hyderabad, India}, abstract = {Computing eigenpairs of a symmetric matrix is a problem arising in many industrial applications, including quantum physics and finite-elements computation for automobiles. A classical approach is to reduce the matrix to tridiagonal form before computing eigenpairs of the tridiagonal matrix. Then, a back-transformation allows one to obtain the final solution. Parallelism issues of the reduction stage have already been tackled in different shared-memory libraries. In this article, we focus on solving the tridiagonal eigenproblem, and we describe a novel implementation of the Divide and Conquer algorithm. The algorithm is expressed as a sequential task-flow, scheduled in an out-of-order fashion by a dynamic runtime which allows the programmer to play with tasks granularity. The resulting implementation is between two and five times faster than the equivalent routine from the Intel MKL library, and outperforms the best MRRR implementation for many matrices.}, keywords = {Eigensolver, lapack, Multicore, plasma, task-based programming}, author = {Azzam Haidar and Jakub Kurzak and Gregoire Pichon and Mathieu Faverge} } @article {1345, title = {On the Design, Autotuning, and Optimization of GPU Kernels for Kinetic Network Simulations Using Fast Explicit Integration and GPU Batched Computation}, year = {2015}, month = {2015-09}, publisher = {Joint Institute for Computational Sciences Seminar Series, Presentation}, address = {Oak Ridge, TN}, author = {Michael Guidry and Azzam Haidar} } @conference {862, title = {On the Design, Development, and Analysis of Optimized Matrix-Vector Multiplication Routines for Coprocessors}, booktitle = {ISC High Performance 2015}, year = {2015}, month = {2015-07}, address = {Frankfurt, Germany}, abstract = {The dramatic change in computer architecture due to the manycore paradigm shift, made the development of numerical routines that are optimal extremely challenging. In this work, we target the development of numerical algorithms and implementations for Xeon Phi coprocessor architecture designs. In particular, we examine and optimize the general and symmetric matrix-vector multiplication routines (gemv/symv), which are some of the most heavily used linear algebra kernels in many important engineering and physics applications. We describe a successful approach on how to address the challenges for this problem, starting from our algorithm design, performance analysis and programing model, to kernel optimization. Our goal, by targeting low-level, easy to understand fundamental kernels, is to develop new optimization strategies that can be effective elsewhere for the use on manycore coprocessors, and to show significant performance improvements compared to the existing state-of-the-art implementations. Therefore, in addition to the new optimization strategies, analysis, and optimal performance results, we finally present the significance of using these routines/strategies to accelerate higher-level numerical algorithms for the eigenvalue problem (EVP) and the singular value decomposition (SVD) that by themselves are foundational for many important applications.}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {894, title = {Efficient Eigensolver Algorithms on Accelerator Based Architectures}, booktitle = {2015 SIAM Conference on Applied Linear Algebra (SIAM LA)}, year = {2015}, month = {2015-10}, publisher = {SIAM}, organization = {SIAM}, address = {Atlanta, GA}, abstract = {The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe a successful methodology on how to address the challenges -starting from our algorithm design, kernel optimization and tuning, to our programming model- in the development of a scalable high-performance symmetric eigenvalue and singular value solver.}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {886, title = {Efficient Implementation Of Quantum Materials Simulations On Distributed CPU-GPU Systems}, booktitle = {The International Conference for High Performance Computing, Networking, Storage and Analysis (SC15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {We present a scalable implementation of the Linearized Augmented Plane Wave method for distributed memory systems, which relies on an efficient distributed, block-cyclic setup of the Hamiltonian and overlap matrices and allows us to turn around highly accurate 1000+ atom all-electron quantum materials simulations on clusters with a few hundred nodes. The implementation runs efficiently on standard multicore CPU nodes, as well as hybrid CPU-GPU nodes. The key for the latter is a novel algorithm to solve the generalized eigenvalue problem for dense, complex Hermitian matrices on distributed hybrid CPU-GPU systems. Performance tests for Li-intercalated CoO2 supercells containing 1501 atoms demonstrate that high-accuracy, transferable quantum simulations can now be used in throughput materials search problems. While our application can benefit and get scalable performance through CPU-only libraries like ScaLAPACK or ELPA2, our new hybrid solver enables the efficient use of GPUs and shows that a hybrid CPU-GPU architecture scales to a desired performance using substantially fewer cluster nodes, and notably, is considerably more energy efficient than the traditional multicore CPU only systems for such complex applications.}, author = {Raffaele Solc{\`a} and Anton Kozhevnikov and Azzam Haidar and Stanimire Tomov and Thomas C. Schulthess and Jack Dongarra} } @conference {887, title = {Flexible Linear Algebra Development and Scheduling with Cholesky Factorization}, booktitle = {17th IEEE International Conference on High Performance Computing and Communications}, year = {2015}, month = {2015-08}, address = {Newark, NJ}, abstract = {Modern high performance computing environments are composed of networks of compute nodes that often contain a variety of heterogeneous compute resources, such as multicore-CPUs, GPUs, and coprocessors. One challenge faced by domain scientists is how to efficiently use all these distributed, heterogeneous resources. In order to use the GPUs effectively, the workload parallelism needs to be much greater than the parallelism for a multicore-CPU. On the other hand, a Xeon Phi coprocessor will work most effectively with degree of parallelism between GPUs and multicore-CPUs. Additionally, effectively using distributed memory nodes brings out another level of complexity where the workload must be carefully partitioned over the nodes. In this work we are using a lightweight runtime environment to handle many of the complexities in such distributed, heterogeneous systems. The runtime environment uses task-superscalar concepts to enable the developer to write serial code while providing parallel execution. The task-programming model allows the developer to write resource-specialization code, so that each resource gets the appropriate sized workload-grain. Our task programming abstraction enables the developer to write a single algorithm that will execute efficiently across the distributed heterogeneous machine. We demonstrate the effectiveness of our approach with performance results for dense linear algebra applications, specifically the Cholesky factorization.}, author = {Azzam Haidar and Asim YarKhan and Chongxiao Cao and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {864, title = {Framework for Batched and GPU-resident Factorization Algorithms to Block Householder Transformations}, booktitle = {ISC High Performance}, year = {2015}, month = {2015-07}, publisher = {Springer}, organization = {Springer}, address = {Frankfurt, Germany}, author = {Azzam Haidar and Tingxing Dong and Stanimire Tomov and Piotr Luszczek and Jack Dongarra} } @article {829, title = {HPC Programming on Intel Many-Integrated-Core Hardware with MAGMA Port to Xeon Phi}, journal = {Scientific Programming}, volume = {23}, year = {2015}, month = {2015-01}, abstract = {This paper presents the design and implementation of several fundamental dense linear algebra (DLA) algorithms for multicore with Intel Xeon Phi Coprocessors. In particular, we consider algorithms for solving linear systems. Further, we give an overview of the MAGMA MIC library, an open source, high performance library that incorporates the developments presented, and in general provides to heterogeneous architectures of multicore with coprocessors the DLA functionality of the popular LAPACK library. The LAPACK-compliance simplifies the use of the MAGMA MIC library in applications, while providing them with portably performant DLA. High performance is obtained through use of the high-performance BLAS, hardware-specific tuning, and a hybridization methodology where we split the algorithm into computational tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components. Our methodology and programming techniques are incorporated into the MAGMA MIC API, which abstracts the application developer from the specifics of the Xeon Phi architecture and is therefore applicable to algorithms beyond the scope of DLA.}, keywords = {communication and computation overlap, dynamic runtime scheduling using dataflow dependences, hardware accelerators and coprocessors, Intel Xeon Phi processor, Many Integrated Cores, numerical linear algebra}, issn = {1058-9244}, doi = {10.3233/SPR-140404}, author = {Azzam Haidar and Jack Dongarra and Khairul Kabir and Mark Gates and Piotr Luszczek and Stanimire Tomov and Yulu Jia} } @conference {888, title = {MAGMA Embedded: Towards a Dense Linear Algebra Library for Energy Efficient Extreme Computing}, booktitle = { 2015 IEEE High Performance Extreme Computing Conference (HPEC {\textquoteright}15), (Best Paper Award)}, year = {2015}, month = {2015-09}, publisher = {IEEE}, organization = {IEEE}, address = {Waltham, MA}, abstract = {Embedded computing, not only in large systems like drones and hybrid vehicles, but also in small portable devices like smart phones and watches, gets more extreme to meet ever increasing demands for extended and improved functionalities. This, combined with the typical constrains for low power consumption and small sizes, makes the design of numerical libraries for embedded systems challenging. In this paper, we present the design and implementation of embedded system aware algorithms, that target these challenges in the area of dense linear algebra. We consider the fundamental problems of solving linear systems of equations and least squares problems, using the LU, QR, and Cholesky factorizations, and illustrate our results, both in terms of performance and energy efficiency, on the Jetson TK1 development kit. We developed performance optimizations for both small and large problems. In contrast to the corresponding LAPACK algorithms, the new designs target the use of many-cores, readily available now even in mobile devices like the Jetson TK1, e.g., featuring 192 CUDA cores. The implementations presented will form the core of a MAGMA Embedded library, to be released as part of the MAGMA libraries. }, author = {Azzam Haidar and Stanimire Tomov and Piotr Luszczek and Jack Dongarra} } @article {1347, title = {MAGMA MIC: Optimizing Linear Algebra for Intel Xeon Phi}, year = {2015}, month = {2015-06}, publisher = {ISC High Performance (ISC15), Intel Booth Presentation}, address = {Frankfurt, Germany}, author = {Hartwig Anzt and Jack Dongarra and Mark Gates and Azzam Haidar and Khairul Kabir and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @conference {916, title = {Optimization for Performance and Energy for Batched Matrix Computations on GPUs}, booktitle = {8th Workshop on General Purpose Processing Using GPUs (GPGPU 8)}, year = {2015}, month = {2015-02}, publisher = {ACM}, organization = {ACM}, address = {San Francisco, CA}, abstract = {As modern hardware keeps evolving, an increasingly effective approach to develop energy efficient and high-performance solvers is to design them to work on many small size independent problems. Many applications already need this functionality, especially for GPUs, which are known to be currently about four to five times more energy efficient than multicore CPUs. We describe the development of the main one-sided factorizations that work for a set of small dense matrices in parallel, and we illustrate our techniques on the LU and Cholesky factorizations. We refer to this mode of operation as a batched factorization. Our approach is based on representing the algorithms as a sequence of batched BLAS routines for GPU-only execution. The goal of avoiding multicore CPU use, e.g., as in the hybrid CPU-GPU algorithms, is to exclusively benefit from the GPU{\textquoteright}s significantly higher energy efficiency, as well as from the removal of the costly CPU-to-GPU communications. Furthermore, we do not use a single symmetric multiprocessor (on the GPU) to factorize a single problem at a time. We illustrate how our performance analysis and the use of profiling and tracing tools guided the development and optimization of batched factorizations to achieve up to 2-fold speedup and 3-fold better energy efficiency compared to our highly optimized batched CPU implementations based on the MKL library (when using two sockets of Intel Sandy Bridge CPUs). Compared to a batched LU factorization featured in the CUBLAS library for GPUs, we achieved up to 2.5 speedup on the K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, doi = {10.1145/2716282.2716288}, author = {Azzam Haidar and Tingxing Dong and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @article {936, title = {Parallel Programming Models for Dense Linear Algebra on Heterogeneous Systems}, journal = {Supercomputing Frontiers and Innovations}, volume = {2}, number = {4}, year = {2015}, month = {2015-10}, abstract = {We present a review of the current best practices in parallel programming models for dense linear algebra (DLA) on heterogeneous architectures. We consider multicore CPUs, stand alone manycore coprocessors, GPUs, and combinations of these. Of interest is the evolution of the programming models for DLA libraries {\textendash} in particular, the evolution from the popular LAPACK and ScaLAPACK libraries to their modernized counterparts PLASMA (for multicore CPUs) and MAGMA (for heterogeneous architectures), as well as other programming models and libraries. Besides providing insights into the programming techniques of the libraries considered, we outline our view of the current strengths and weaknesses of their programming models {\textendash} especially in regards to hardware trends and ease of programming high-performance numerical software that current applications need {\textendash} in order to motivate work and future directions for the next generation of parallel programming models for high-performance linear algebra libraries on heterogeneous systems.}, keywords = {dense linear algebra, gpu, HPC, Multicore, plasma, Programming models, runtime}, doi = {10.14529/jsfi1504}, author = {Maksims Abalenkovs and Ahmad Abdelfattah and Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki and Asim YarKhan} } @conference {860, title = {Performance Analysis and Design of a Hessenberg Reduction using Stabilized Blocked Elementary Transformations for New Architectures}, booktitle = {The Spring Simulation Multi-Conference 2015 (SpringSim{\textquoteright}15), Best Paper Award}, year = {2015}, month = {2015-04}, address = {Alexandria, VA}, abstract = {The solution of nonsymmetric eigenvalue problems, Ax = λx, can be accelerated substantially by first reducing A to an upper Hessenberg matrix H that has the same eigenvalues as A. This can be done using Householder orthogonal transformations, which is a well established standard, or stabilized elementary transformations. The latter approach, although having half the flops of the former, has been used less in practice, e.g., on computer architectures with well developed hierarchical memories, because of its memory-bound operations and the complexity in stabilizing it. In this paper we revisit the stabilized elementary transformations approach in the context of new architectures {\textendash} both multicore CPUs and Xeon Phi coprocessors. We derive for a first time a blocking version of the algorithm. The blocked version reduces the memory-bound operations and we analyze its performance. A performance model is developed that shows the limitations of both approaches. The competitiveness of using stabilized elementary transformations has been quantified, highlighting that it can be 20 to 30\% faster on current high-end multicore CPUs and Xeon Phi coprocessors.}, keywords = {Eigenvalues problem, Hessenberg reduction, Multi/Many-core, Stabilized Elementary Transformations}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {861, title = {Performance Analysis and Optimization of Two-Sided Factorization Algorithms for Heterogeneous Platform}, booktitle = {International Conference on Computational Science (ICCS 2015)}, year = {2015}, month = {2015-06}, address = {Reykjav{\'\i}k, Iceland}, author = {Khairul Kabir and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @article {1346, title = {Towards a High-Performance Tensor Algebra Package for Accelerators}, year = {2015}, month = {2015-09}, publisher = {moky Mountains Computational Sciences and Engineering Conference (SMC15)}, address = {Gatlinburg, TN}, author = {Marc Baboulin and Veselin Dobrev and Jack Dongarra and Christopher Earl and Jo{\"e}l Falcou and Azzam Haidar and Ian Karlin and Tzanio Kolev and Ian Masliah and Stanimire Tomov} } @conference {844, title = {Towards Batched Linear Solvers on Accelerated Hardware Platforms}, booktitle = {8th Workshop on General Purpose Processing Using GPUs (GPGPU 8) co-located with PPOPP 2015}, year = {2015}, month = {2015-02}, publisher = {ACM}, organization = {ACM}, address = {San Francisco, CA}, abstract = {As hardware evolves, an increasingly effective approach to develop energy efficient, high-performance solvers, is to design them to work on many small and independent problems. Indeed, many applications already need this functionality, especially for GPUs, which are known to be currently about four to five times more energy efficient than multicore CPUs for every floating-point operation. In this paper, we describe the development of the main one-sided factorizations: LU, QR, and Cholesky; that are needed for a set of small dense matrices to work in parallel. We refer to such algorithms as batched factorizations. Our approach is based on representing the algorithms as a sequence of batched BLAS routines for GPU-contained execution. Note that this is similar in functionality to the LAPACK and the hybrid MAGMA algorithms for large-matrix factorizations. But it is different from a straightforward approach, whereby each of GPU{\textquoteright}s symmetric multiprocessors factorizes a single problem at a time.We illustrate how our performance analysis together with the profiling and tracing tools guided the development of batched factorizations to achieve up to 2-fold speedup and 3-fold better energy efficiency compared to our highly optimized batched CPU implementations based on the MKL library on a two-sockets, Intel Sandy Bridge server. Compared to a batched LU factorization featured in the NVIDIA{\textquoteright}s CUBLAS library for GPUs, we achieves up to 2.5-fold speedup on the K40 GPU.}, keywords = {batched factorization, hardware accelerators, numerical linear algebra, numerical software libraries, one-sided factorization algorithms}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @inproceedings {959, title = {Weighted Dynamic Scheduling with Many Parallelism Grains for Offloading of Numerical Workloads to Multiple Varied Accelerators}, journal = {Proceedings of the 6th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA{\textquoteright}15)}, volume = {No. 5}, year = {2015}, month = {2015-11}, publisher = {ACM}, address = {Austin, TX}, abstract = {A wide variety of heterogeneous compute resources are available to modern computers, including multiple sockets containing multicore CPUs, one-or-more GPUs of varying power, and coprocessors such as the Intel Xeon Phi. The challenge faced by domain scientists is how to efficiently and productively use these varied resources. For example, in order to use GPUs effectively, the workload must have a greater degree of parallelism than a workload designed for a multicore-CPU. The domain scientist would have to design and schedule an application in multiple degrees of parallelism and task grain sizes in order to obtain efficient performance from the resources. We propose a productive programming model starting from serial code, which achieves parallelism and scalability by using a task-superscalar runtime environment to adapt the computation to the available resources. The adaptation is done at multiple points, including multi-level data partitioning, adaptive task grain sizes, and dynamic task scheduling. The effectiveness of this approach for utilizing multi-way heterogeneous hardware resources is demonstrated by implementing dense linear algebra applications.}, keywords = {dataflow scheduling, hardware accelerators, multi-grain parallelism}, author = {Azzam Haidar and Yulu Jia and Piotr Luszczek and Stanimire Tomov and Asim YarKhan and Jack Dongarra} } @conference {766, title = {Accelerating Eigenvector Computation in the Nonsymmetric Eigenvalue Problem}, booktitle = {VECPAR 2014}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {In the nonsymmetric eigenvalue problem, work has focused on the Hessenberg reduction and QR iteration, using efficient algorithms and fast, Level 3 BLAS routines. Comparatively, computation of eigenvectors performs poorly, limited to slow, Level 2 BLAS performance with little speedup on multi-core systems. It has thus become a dominant cost in the eigenvalue problem. To address this, we present improvements for the eigenvector computation to use Level 3 BLAS where applicable and parallelize the remaining triangular solves, achieving good parallel scaling and accelerating the overall eigenvalue problem more than three-fold.}, author = {Mark Gates and Azzam Haidar and Jack Dongarra} } @inbook {780, title = {Accelerating Numerical Dense Linear Algebra Calculations with GPUs}, booktitle = {Numerical Computations with GPUs}, year = {2014}, pages = {3-28}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, chapter = {1}, isbn = {978-3-319-06547-2}, doi = {10.1007/978-3-319-06548-9_1}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Ichitaro Yamazaki} } @conference {715, title = {A Fast Batched Cholesky Factorization on a GPU}, booktitle = {International Conference on Parallel Processing (ICPP-2014)}, year = {2014}, month = {2014-09}, address = {Minneapolis, MN}, abstract = {Currently, state of the art libraries, like MAGMA, focus on very large linear algebra problems, while solving many small independent problems, which is usually referred to as batched problems, is not given adequate attention. In this paper, we proposed a batched Cholesky factorization on a GPU. Three algorithms {\textendash} nonblocked, blocked, and recursive blocked {\textendash} were examined. The left-looking version of the Cholesky factorization is used to factorize the panel, and the right-looking Cholesky version is used to update the trailing matrix in the recursive blocked algorithm. Our batched Cholesky achieves up to 1:8 speedup compared to the optimized parallel implementation in the MKL library on two sockets of Intel Sandy Bridge CPUs. Further, we use the new routines to develop a single Cholesky factorization solver which targets large matrix sizes. Our approach differs from MAGMA by having an entirely GPU implementation where both the panel factorization and the trailing matrix updates are on the GPU. Such an implementation does not depend on the speed of the CPU. Compared to the MAGMA library, our full GPU solution achieves 85\% of the hybrid MAGMA performance which uses 16 Sandy Bridge cores, in addition to a K40 Nvidia GPU. Moreover, we achieve 80\% of the practical dgemm peak of the machine, while MAGMA achieves only 75\%, and finally, in terms of energy consumption, we outperform MAGMA by 1.5 in performance-per-watt for large matrices.}, author = {Tingxing Dong and Azzam Haidar and Stanimire Tomov and Jack Dongarra} } @conference {765, title = {Heterogeneous Acceleration for Linear Algebra in Mulit-Coprocessor Environments}, booktitle = {VECPAR 2014}, year = {2014}, month = {2014-06}, address = {Eugene, OR}, abstract = {We present an efficient and scalable programming model for the development of linear algebra in heterogeneous multi-coprocessor environments. The model incorporates some of the current best design and implementation practices for the heterogeneous acceleration of dense linear algebra (DLA). Examples are given as the basis for solving linear systems{\textquoteright} algorithms {\textendash} the LU, QR, and Cholesky factorizations. To generate the extreme level of parallelism needed for the efficient use of coprocessors, algorithms of interest are redesigned and then split into well-chosen computational tasks. The tasks execution is scheduled over the computational components of a hybrid system of multi-core CPUs and coprocessors using a light-weight runtime system. The use of light-weight runtime systems keeps scheduling overhead low, while enabling the expression of parallelism through otherwise sequential code. This simplifies the development efforts and allows the exploration of the unique strengths of the various hardware components.}, keywords = {Computer science, factorization, Heterogeneous systems, Intel Xeon Phi, linear algebra}, author = {Azzam Haidar and Piotr Luszczek and Stanimire Tomov and Jack Dongarra} } @conference {859, title = {LU Factorization of Small Matrices: Accelerating Batched DGETRF on the GPU}, booktitle = {16th IEEE International Conference on High Performance Computing and Communications (HPCC)}, year = {2014}, month = {2014-08}, publisher = {IEEE}, organization = {IEEE}, address = {Paris, France}, abstract = {Gaussian Elimination is commonly used to solve dense linear systems in scientific models. In a large number of applications, a need arises to solve many small size problems, instead of few large linear systems. The size of each of these small linear systems depends, for example, on the number of the ordinary differential equations (ODEs) used in the model, and can be on the order of hundreds of unknowns. To efficiently exploit the computing power of modern accelerator hardware, these linear systems are processed in batches. To improve the numerical stability of the Gaussian Elimination, at least partial pivoting is required, most often accomplished with row pivoting. However, row pivoting can result in a severe performance penalty on GPUs because it brings in thread divergence and non-coalesced memory accesses. The state-of-the-art libraries for linear algebra that target GPUs, such as MAGMA, focus on large matrix sizes. They change the data layout by transposing the matrix to avoid these divergence and non-coalescing penalties. However, the data movement associated with transposition is very expensive for small matrices. In this paper, we propose a batched LU factorization for GPUs by using a multi-level blocked right looking algorithm that preserves the data layout but minimizes the penalty of partial pivoting. Our batched LU achieves up to 2:5-fold speedup when compared to the alternative CUBLAS solutions on a K40c GPU and 3:6-fold speedup over MKL on a node of the Titan supercomputer at ORNL in a nuclear reaction network simulation.}, author = {Tingxing Dong and Azzam Haidar and Piotr Luszczek and James Harris and Stanimire Tomov and Jack Dongarra} } @article {831, title = {Model-Driven One-Sided Factorizations on Multicore, Accelerated Systems}, journal = {Supercomputing Frontiers and Innovations}, volume = {1}, year = {2014}, abstract = {Hardware heterogeneity of the HPC platforms is no longer considered unusual but instead have become the most viable way forward towards Exascale. In fact, the multitude of the heterogeneous resources available to modern computers are designed for different workloads and their efficient use is closely aligned with the specialized role envisaged by their design. Commonly in order to efficiently use such GPU resources, the workload in question must have a much greater degree of parallelism than workloads often associated with multicore processors (CPUs). Available GPU variants differ in their internal architecture and, as a result, are capable of handling workloads of varying degrees of complexity and a range of computational patterns. This vast array of applicable workloads will likely lead to an ever accelerated mixing of multicore-CPUs and GPUs in multi-user environments with the ultimate goal of offering adequate computing facilities for a wide range of scientific and technical workloads. In the following paper, we present a research prototype that uses a lightweight runtime environment to manage the resource-specific workloads, and to control the dataflow and parallel execution in hybrid systems. Our lightweight runtime environment uses task superscalar concepts to enable the developer to write serial code while providing parallel execution. This concept is reminiscent of dataflow and systolic architectures in its conceptualization of a workload as a set of side-effect-free tasks that pass data items whenever the associated work assignment have been completed. Additionally, our task abstractions and their parametrization enable uniformity in the algorithmic development across all the heterogeneous resources without sacrificing precious compute cycles. We include performance results for dense linear algebra functions which demonstrate the practicality and effectiveness of our approach that is aptly capable of full utilization of a wide range of accelerator hardware.}, keywords = {dense linear algebra, hardware accelerators, task superscalar scheduling}, doi = {http://dx.doi.org/10.14529/jsfi1401}, author = {Jack Dongarra and Azzam Haidar and Jakub Kurzak and Piotr Luszczek and Stanimire Tomov and Asim YarKhan} } @conference {839, title = {New Algorithm for Computing Eigenvectors of the Symmetric Eigenvalue Problem}, booktitle = {Workshop on Parallel and Distributed Scientific and Engineering Computing, IPDPS 2014 (Best Paper)}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {We describe a design and implementation of a multi-stage algorithm for computing eigenvectors of a dense symmetric matrix. We show that reformulating the existing algorithms is beneficial in terms of performance even if that doubles the computational complexity. Through detailed analysis, we show that the effect of the increase in the asymptotic operation count may be compensated by a much improved performance rate. Our performance results indicate that using our approach achieves very good speedup and scalability even when directly compared with the existing state-of-the-art software.}, doi = {10.1109/IPDPSW.2014.130}, author = {Azzam Haidar and Piotr Luszczek and Jack Dongarra} } @article {758, title = {A Novel Hybrid CPU-GPU Generalized Eigensolver for Electronic Structure Calculations Based on Fine Grained Memory Aware Tasks}, journal = {International Journal of High Performance Computing Applications}, volume = {28}, year = {2014}, month = {2014-05}, pages = {196-209}, chapter = {196}, abstract = {The adoption of hybrid CPU{\textendash}GPU nodes in traditional supercomputing platforms such as the Cray-XK6 opens acceleration opportunities for electronic structure calculations in materials science and chemistry applications, where medium-sized generalized eigenvalue problems must be solved many times. These eigenvalue problems are too small to effectively solve on distributed systems, but can benefit from the massive computing power concentrated on a single-node, hybrid CPU{\textendash}GPU system. However, hybrid systems call for the development of new algorithms that efficiently exploit heterogeneity and massive parallelism of not just GPUs, but of multicore/manycore CPUs as well. Addressing these demands, we developed a generalized eigensolver featuring novel algorithms of increased computational intensity (compared with the standard algorithms), decomposition of the computation into fine-grained memory aware tasks, and their hybrid execution. The resulting eigensolvers are state-of-the-art in high-performance computing, significantly outperforming existing libraries. We describe the algorithm and analyze its performance impact on applications of interest when different fractions of eigenvectors are needed by the host electronic structure code. }, keywords = {Eigensolver, electronic structure calculations, generalized eigensolver, gpu, high performance, hybrid, Multicore, two-stage}, doi = {10.1177/1094342013502097 }, author = {Azzam Haidar and Raffaele Solc{\`a} and Mark Gates and Stanimire Tomov and Thomas C. Schulthess and Jack Dongarra} } @conference {828, title = {Performance and Portability with OpenCL for Throughput-Oriented HPC Workloads Across Accelerators, Coprocessors, and Multicore Processors}, booktitle = {5th Workshop on Latest Advances in Scalable Algorithms for Large-Scale Systems (ScalA {\textquoteright}14)}, year = {2014}, month = {2014-11}, publisher = {IEEE}, organization = {IEEE}, address = {New Orleans, LA}, abstract = {Ever since accelerators and coprocessors became the mainstream hardware for throughput-oriented HPC workloads, various programming techniques have been proposed to increase productivity in terms of both the performance and ease-of-use. We evaluate these aspects of OpenCL on a number of hardware platforms for an important subset of dense linear algebra operations that are relevant to a wide range of scientific applications. Our findings indicate that OpenCL portability has improved since our previous publication and many new and surprising usage scenarios are possible that rival those available after decades of software development on the CPUs. The combined performance-portability metric, even though not promised by the OpenCL standard, reflects the need for tuning performance-critical operations during the porting process and we show how a large portion of the available efficiency is lost if the tuning is not done correctly.}, doi = {10.1109/ScalA.2014.8}, author = {Azzam Haidar and Chongxiao Cao and Ichitaro Yamazaki and Jack Dongarra and Mark Gates and Piotr Luszczek and Stanimire Tomov} } @conference {809, title = {Unified Development for Mixed Multi-GPU and Multi-Coprocessor Environments using a Lightweight Runtime Environment}, booktitle = {IPDPS 2014}, year = {2014}, month = {2014-05}, publisher = {IEEE}, organization = {IEEE}, address = {Phoenix, AZ}, abstract = {Many of the heterogeneous resources available to modern computers are designed for different workloads. In order to efficiently use GPU resources, the workload must have a greater degree of parallelism than a workload designed for multicore-CPUs. And conceptually, the Intel Xeon Phi coprocessors are capable of handling workloads somewhere in between the two. This multitude of applicable workloads will likely lead to mixing multicore-CPUs, GPUs, and Intel coprocessors in multi-user environments that must offer adequate computing facilities for a wide range of workloads. In this work, we are using a lightweight runtime environment to manage the resourcespecific workload, and to control the dataflow and parallel execution in two-way hybrid systems. The lightweight runtime environment uses task superscalar concepts to enable the developer to write serial code while providing parallel execution. In addition, our task abstractions enable unified algorithmic development across all the heterogeneous resources. We provide performance results for dense linear algebra applications, demonstrating the effectiveness of our approach and full utilization of a wide variety of accelerator hardware.}, keywords = {algorithms, Computer science, CUDA, Heterogeneous systems, Intel Xeon Phi, linear algebra, nVidia, Tesla K20, Tesla M2090}, author = {Azzam Haidar and Chongxiao Cao and Jack Dongarra and Piotr Luszczek and Stanimire Tomov} } @techreport {704, title = {An Improved Parallel Singular Value Algorithm and Its Implementation for Multicore Hardware}, journal = {University of Tennessee Computer Science Technical Report (also LAWN 283)}, number = {ut-eecs-13-720}, year = {2013}, month = {2013-10}, publisher = {University of Tennessee}, keywords = {lapack, plasma, scalapack}, author = {Azzam Haidar and Piotr Luszczek and Jakub Kurzak and Jack Dongarra} } @conference {696, title = {An Improved Parallel Singular Value Algorithm and Its Implementation for Multicore Hardware}, booktitle = {Supercomputing 2013}, year = {2013}, month = {2013-11}, address = {Denver, CO}, author = {Azzam Haidar and Piotr Luszczek and Jakub Kurzak and Jack Dongarra} } @inproceedings {757, title = {Leading Edge Hybrid Multi-GPU Algorithms for Generalized Eigenproblems in Electronic Structure Calculations}, journal = {International Supercomputing Conference (ISC)}, volume = {7905}, year = {2013}, month = {2013-06}, pages = {67-80}, publisher = {Springer Berlin Heidelberg}, edition = {Lecture Notes in Computer Science}, address = {Leipzig, Germany}, abstract = {Today{\textquoteright}s high computational demands from engineering fields and complex hardware development make it necessary to develop and optimize new algorithms toward achieving high performance and good scalability on the next generation of computers. The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe and analyze a successful methodology to address the challenges{\textemdash}starting from our algorithm design, kernel optimization and tuning, to our programming model{\textemdash}in the development of a scalable high-performance generalized eigenvalue solver in the context of electronic structure calculations in materials science applications. We developed a set of leading edge dense linear algebra algorithms, as part of a generalized eigensolver, featuring fine grained memory aware kernels, a task based approach and hybrid execution/scheduling. The goal of the new design is to increase the computational intensity of the major compute kernels and to reduce synchronization and data transfers between GPUs. We report the performance impact on the generalized eigensolver when different fractions of eigenvectors are needed. The algorithm described provides an enormous performance boost compared to current GPU-based solutions, and performance comparable to state-of-the-art distributed solutions, using a single node with multiple GPUs.}, isbn = {978-3-642-38750-0}, doi = {10.1007/978-3-642-38750-0_6}, author = {Azzam Haidar and Stanimire Tomov and Jack Dongarra and Raffaele Solc{\`a} and Thomas C. Schulthess} } @conference {753, title = {Portable HPC Programming on Intel Many-Integrated-Core Hardware with MAGMA Port to Xeon Phi}, booktitle = {PPAM 2013}, year = {2013}, month = {2013-09}, address = {Warsaw, Poland}, abstract = {This paper presents the design and implementation of several fundamental dense linear algebra (DLA) algorithms for multicore with Intel Xeon Phi Coprocessors. In particular, we consider algorithms for solving linear systems. Further, we give an overview of the MAGMA MIC library, an open source, high performance library that incorporates the developments presented, and in general provides to heterogeneous architectures of multicore with coprocessors the DLA functionality of the popular LAPACK library. The LAPACK-compliance simplifies the use of the MAGMA MIC library in applications, while providing them with portably performant DLA. High performance is obtained through use of the high-performance BLAS, hardware-specific tuning, and a hybridization methodology where we split the algorithm into computational tasks of various granularities. Execution of those tasks is properly scheduled over the heterogeneous hardware components by minimizing data movements and mapping algorithmic requirements to the architectural strengths of the various heterogeneous hardware components. Our methodology and programming techniques are incorporated into the MAGMA MIC API, which abstracts the application developer from the specifics of the Xeon Phi architecture and is therefore applicable to algorithms beyond the scope of DLA.}, keywords = {magma, mic, xeon phi}, author = {Jack Dongarra and Mark Gates and Azzam Haidar and Yulu Jia and Khairul Kabir and Piotr Luszczek and Stanimire Tomov} } @conference {686, title = {Toward a scalable multi-GPU eigensolver via compute-intensive kernels and efficient communication}, booktitle = {Proceedings of the 27th ACM International Conference on Supercomputing (ICS {\textquoteright}13)}, year = {2013}, month = {2013-06}, publisher = {ACM Press}, organization = {ACM Press}, address = {Eugene, Oregon, USA}, abstract = {The enormous gap between the high-performance capabilities of GPUs and the slow interconnect between them has made the development of numerical software that is scalable across multiple GPUs extremely challenging. We describe a successful methodology on how to address the challenges---starting from our algorithm design, kernel optimization and tuning, to our programming model---in the development of a scalable high-performance tridiagonal reduction algorithm for the symmetric eigenvalue problem. This is a fundamental linear algebra problem with many engineering and physics applications. We use a combination of a task-based approach to parallelism and a new algorithmic design to achieve high performance. The goal of the new design is to increase the computational intensity of the major compute kernels and to reduce synchronization and data transfers between GPUs. This may increase the number of flops, but the increase is offset by the more efficient execution and reduced data transfers. Our performance results are the best available, providing an enormous performance boost compared to current state-of-the-art solutions. In particular, our software scales up to 1070 Gflop/s using 16 Intel E5-2670 cores and eight M2090 GPUs, compared to 45 Gflop/s achieved by the optimized Intel Math Kernel Library (MKL) using only the 16 CPU cores.}, keywords = {eigenvalue, gpu communication, gpu computation, heterogeneous programming model, performance, reduction to tridiagonal, singular value decomposiiton, task parallelism}, isbn = {9781450321303}, doi = {10.1145/2464996.2465438}, url = {http://dl.acm.org/citation.cfm?doid=2464996.2465438}, author = {Azzam Haidar and Mark Gates and Stanimire Tomov and Jack Dongarra}, editor = {Allen D. Malony and Nemirovsky, Mario and Midkiff, Sam} } @article {icl:695, title = {A Comprehensive Study of Task Coalescing for Selecting Parallelism Granularity in a Two-Stage Bidiagonal Reduction}, journal = {IPDPS 2012}, year = {2012}, month = {2012-05}, address = {Shanghai, China}, author = {Azzam Haidar and Hatem Ltaeif and Piotr Luszczek and Jack Dongarra} } @article {1355, title = {MAGMA: A Breakthrough in Solvers for Eigenvalue Problems}, year = {2012}, month = {2012-05}, publisher = {GPU Technology Conference (GTC12), Presentation}, address = {San Jose, CA}, author = {Stanimire Tomov and Jack Dongarra and Azzam Haidar and Ichitaro Yamazaki and Tingxing Dong and Thomas Schulthess and Raffaele Solc{\`a}} } @article {1349, title = {MAGMA: A New Generation of Linear Algebra Library for GPU and Multicore Architectures}, year = {2012}, month = {2012-11}, publisher = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC12), Presentation}, address = {Salt Lake City, UT}, author = {Jack Dongarra and Tingxing Dong and Mark Gates and Azzam Haidar and Stanimire Tomov and Ichitaro Yamazaki} } @article {icl:729, title = {A Novel Hybrid CPU-GPU Generalized Eigensolver for Electronic Structure Calculations Based on Fine Grained Memory Aware Tasks}, journal = {Supercomputing {\textquoteright}12 (poster)}, year = {2012}, month = {2012-11}, address = {Salt Lake City, Utah}, author = {Raffaele Solc{\`a} and Azzam Haidar and Stanimire Tomov and Jack Dongarra and Thomas C. Schulthess} } @article {icl:726, title = {Toward High Performance Divide and Conquer Eigensolver for Dense Symmetric Matrices}, journal = {SIAM Journal on Scientific Computing (Accepted)}, year = {2012}, month = {2012-07}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @inproceedings {icl:607, title = {3-D parallel frequency-domain visco-acoustic wave modelling based on a hybrid direct/iterative solver}, journal = {73rd EAGE Conference \& Exhibition incorporating SPE EUROPEC 2011, Vienna, Austria, 23-26 May}, year = {2011}, month = {2011-00}, author = {Azzam Haidar and Luc Giraud and Hafedh Ben-Hadj-Ali and Florent Sourbier and St{\'e}phane Operto and Jean Virieux} } @inproceedings {icl:605, title = {Algebraic Schwarz Preconditioning for the Schur Complement: Application to the Time-Harmonic Maxwell Equations Discretized by a Discontinuous Galerkin Method.}, journal = {The Twentieth International Conference on Domain Decomposition Methods}, year = {2011}, month = {2011-02}, address = {La Jolla, California}, url = {http://hal.inria.fr/inria-00577639}, author = {Emmanuel Agullo and Luc Giraud and Amina Guermouche and Azzam Haidar and Stephane Lanteri and Jean Roman} } @techreport {icl:631, title = {Analysis of Dynamically Scheduled Tile Algorithms for Dense Linear Algebra on Multicore Architectures}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-666, (also Lawn 243)}, year = {2011}, month = {2011-03}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Asim YarKhan and Jack Dongarra} } @inproceedings {icl:676, title = {Flexible Development of Dense Linear Algebra Algorithms on Massively Parallel Architectures with DPLASMA}, journal = {Proceedings of the Workshops of the 25th IEEE International Symposium on Parallel and Distributed Processing (IPDPS 2011 Workshops)}, year = {2011}, month = {2011-05}, pages = {1432-1441}, publisher = {IEEE}, address = {Anchorage, Alaska, USA}, keywords = {dague, dplasma, parsec}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @article {icl:606, title = {Parallel algebraic domain decomposition solver for the solution of augmented systems.}, journal = {Parallel, Distributed, Grid and Cloud Computing for Engineering, Ajaccio, Corsica, France, 12-15 April}, year = {2011}, month = {2011-00}, author = {Emmanuel Agullo and Luc Giraud and Amina Guermouche and Azzam Haidar and Jean Roman} } @inproceedings {icl:657, title = {Parallel Reduction to Condensed Forms for Symmetric Eigenvalue Problems using Aggregated Fine-Grained and Memory-Aware Kernels}, journal = {Proceedings of 2011 International Conference for High Performance Computing, Networking, Storage and Analysis (SC11)}, year = {2011}, month = {2011-11}, address = {Seattle, WA}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @techreport {icl:627, title = {Parallel Reduction to Condensed Forms for Symmetric Eigenvalue Problems using Aggregated Fine-Grained and Memory-Aware Kernels}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-11-677, (also Lawn254)}, year = {2011}, month = {2011-08}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @article {icl:603, title = {Three-dimensional parallel frequency-domain visco-acoustic wave modelling based on a hybrid direct/iterative solver.}, journal = {To appear in Geophysical Prospecting journal.}, year = {2011}, month = {2011-00}, author = {Florent Sourbier and Azzam Haidar and Luc Giraud and Hafedh Ben-Hadj-Ali and St{\'e}phane Operto and Jean Virieux} } @article {icl:604, title = {Toward High Performance Divide and Conquer Eigensolver for Dense Symmetric Matrices.}, journal = {Submitted to SIAM Journal on Scientific Computing (SISC)}, year = {2011}, month = {2011-00}, author = {Azzam Haidar and Hatem Ltaeif and Jack Dongarra} } @article {icl:533, title = {Analysis of Dynamically Scheduled Tile Algorithms for Dense Linear Algebra on Multicore Architectures}, journal = {Submitted to Concurrency and Computations: Practice and Experience}, year = {2010}, month = {2010-11}, keywords = {plasma, quark}, author = {Azzam Haidar and Hatem Ltaeif and Asim YarKhan and Jack Dongarra} } @techreport {icl:563, title = {Distributed Dense Numerical Linear Algebra Algorithms on Massively Parallel Architectures: DPLASMA}, journal = {University of Tennessee Computer Science Technical Report, UT-CS-10-660}, year = {2010}, month = {2010-09}, keywords = {dague, dplasma, parsec, plasma}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @techreport {icl:529, title = {Distributed-Memory Task Execution and Dependence Tracking within DAGuE and the DPLASMA Project}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-10-02}, year = {2010}, month = {2010-00}, keywords = {dague, plasma}, author = {George Bosilca and Aurelien Bouteiller and Anthony Danalis and Mathieu Faverge and Azzam Haidar and Thomas Herault and Jakub Kurzak and Julien Langou and Pierre Lemariner and Hatem Ltaeif and Piotr Luszczek and Asim YarKhan and Jack Dongarra} } @article {icl:553, title = {MaPHyS or the Development of a Parallel Algebraic Domain Decomposition Solver in the Course of the Solstice Project}, journal = {Sparse Days 2010 Meeting at CERFACS}, year = {2010}, month = {2010-06}, address = {Toulouse, France}, author = {Emmanuel Agullo and Luc Giraud and Amina Guermouche and Azzam Haidar and Jean Roman and Yohan Lee-Tin-Yien} } @article {icl:545, title = {Sparse approximations of the Schur complement for parallel algebraic hybrid solvers in 3D}, journal = {Numerical Mathematics: Theory, Methods and Applications}, volume = {3}, number = {3}, year = {2010}, month = {2010-00}, pages = {64-82}, publisher = {Golbal Science Press}, address = {Beijing}, author = {Luc Giraud and Azzam Haidar and Yousef Saad}, editor = {C. Zhiming} } @article {icl:552, title = {Towards a Complexity Analysis of Sparse Hybrid Linear Solvers}, journal = {PARA 2010}, year = {2010}, month = {2010-06}, address = {Reykjavik, Iceland}, author = {Emmanuel Agullo and Luc Giraud and Amina Guermouche and Azzam Haidar and Jean Roman} } @article {icl:544, title = {Using multiple levels of parallelism to enhance the performance of domain decomposition solvers}, journal = {Parallel Computing}, volume = {36}, number = {5-6}, year = {2010}, month = {2010-00}, pages = {285-296}, publisher = {Elsevier journals}, author = {Luc Giraud and Azzam Haidar and Stephane Pralet}, editor = {Costas Bekas and Pascua D{\textquoteright}Ambra and Ananth Grama and Yousef Saad and Petko Yanev} }