@techreport {1471, title = {Mixed-Precision Solution of Linear Systems Using Accelerator-Based Computing}, journal = {Innovative Computing Laboratory Technical Report}, number = {ICL-UT-20-05}, year = {2020}, month = {2020-05}, publisher = {University of Tennessee}, abstract = {Double-precision floating-point arithmetic (FP64) has been the de facto standard for engineering and scientific simulations for several decades. Problem complexity and the sheer volume of data coming from various instruments and sensors motivate researchers to mix and match various approaches to optimize compute resources, including different levels of floating-point precision. In recent years, machine learning has motivated hardware support for half-precision floating-point arithmetic. A primary challenge in high-performance computing is to leverage reduced- and mixed-precision hardware. We show how the FP16/FP32 Tensor Cores on NVIDIA GPUs can be exploited to accelerate the solution of linear systems of equations Ax = b without sacrificing numerical stability. We achieve a 4{\texttimes}{\textendash}5{\texttimes} performance increase and 5{\texttimes} better energy efficiency versus the standard FP64 implementation while maintaining an FP64 level of numerical stability.}, author = {Azzam Haidar and Harun Bayraktar and Stanimire Tomov and Jack Dongarra and Nicholas J. Higham} }