@conference {1094, title = {Co-Scheduling Algorithms for Cache-Partitioned Systems}, booktitle = {19th Workshop on Advances in Parallel and Distributed Computational Models}, year = {2017}, month = {2017-05}, publisher = {IEEE Computer Society Press}, organization = {IEEE Computer Society Press}, address = {Orlando, FL}, abstract = {Cache-partitioned architectures allow subsections of the shared last-level cache (LLC) to be exclusively reserved for some applications. This technique dramatically limits interactions between applications that are concurrently executing on a multicore machine. Consider n applications that execute concurrently, with the objective to minimize the makespan, defined as the maximum completion time of the n applications. Key scheduling questions are: (i) which proportion of cache and (ii) how many processors should be given to each application? Here, we assign rational numbers of processors to each application, since they can be shared across applications through multi-threading. In this paper, we provide answers to (i) and (ii) for perfectly parallel applications. Even though the problem is shown to be NP-complete, we give key elements to determine the subset of applications that should share the LLC (while remaining ones only use their smaller private cache). Building upon these results, we design efficient heuristics for general applications. Extensive simulations demonstrate the usefulness of co-scheduling when our efficient cache partitioning strategies are deployed.}, keywords = {Computational modeling, Degradation, Interference, Mathematical model, Program processors, Supercomputers, Throughput}, doi = {10.1109/IPDPSW.2017.60}, author = {Guillaume Aupy and Anne Benoit and Lo{\"\i}c Pottier and Padma Raghavan and Yves Robert and Manu Shantharam} }