@techreport {, title = {P1673R3: A Free Function Linear algebra Interface Based on the BLAS}, journal = {ISO JTC1 SC22 WG22}, number = {P1673R3}, year = {2021}, month = {2021-04}, publisher = {ISO}, type = {standard}, abstract = {We believe this proposal is complementary to P1385, a proposal for a C++ Standard linear algebra library that introduces matrix and vector classes and overloaded arithmetic operators. In fact, we think that our proposal would make a natural foundation for a library like what P1385 proposes. However, a free function interface -- which clearly separates algorithms from data structures -- more naturally allows for a richer set of operations such as what the BLAS provides. A natural extension of the present proposal would include accepting P1385{\textquoteright}s matrix and vector objects as input for the algorithms proposed here. A straightforward way to do that would be for P1385{\textquoteright}s matrix and vector objects to make views of their data available as basic_mdspan.}, keywords = {C++, linear algebra}, url = {http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1673r3.pdf}, author = {Mark Hoemmen and Daisy Hollman and Christian Trott and Daniel Sunderland and Nevin Liber and Li-Ta Lo and Damien Lebrun-Grandie and Graham Lopez and Peter Caday and Sarah Knepper and Piotr Luszczek and Timothy Costa} } @article {1300, title = {Batched BLAS (Basic Linear Algebra Subprograms) 2018 Specification}, year = {2018}, month = {2018-07}, abstract = {This document describes an API for Batch Basic Linear Algebra Subprograms (Batched BLAS or BBLAS). We focus on many independent BLAS operations on small matrices that are grouped together and processed by a single routine, called a Batched BLAS routine. The extensions beyond the original BLAS standard are considered that specify a programming interface not only for routines with uniformly-sized matrices and/or vectors but also for the situation where the sizes vary. The aim is to provide more efficient, but portable, implementations of algorithms on high-performance manycore platforms. These include multicore and many-core CPU processors; GPUs and coprocessors; as well as other hardware accelerators with floating-point compute facility.}, author = {Jack Dongarra and Iain Duff and Mark Gates and Azzam Haidar and Sven Hammarling and Nicholas J. Higham and Jonathan Hogg and Pedro Valero Lara and Piotr Luszczek and Mawussi Zounon and Samuel D. Relton and Stanimire Tomov and Timothy Costa and Sarah Knepper} }