Make backend names in JSON reports match burnbench CLI (#1375)

* Make backend names in JSON reports match burnbench CLI

- add `config_name`  to `Backend` trait
- add `backend_config_name` to  `Benchmark` trait
- fix documentation for JSON reports to use correct unit of time

* Revert "Make backend names in JSON reports match burnbench CLI"

This reverts commit a09edb6389.

* [backend-comparison] Serialize the feature name passed to burnbench

---------

Co-authored-by: syl20bnr <sylvain.benner@gmail.com>
This commit is contained in:
Ilya Dmitrichenko 2024-04-01 14:48:44 +01:00 committed by GitHub
parent edc683bc4b
commit 67994c02d5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 148 additions and 29 deletions

View File

@ -36,13 +36,25 @@ impl<B: Backend, const D: usize> Benchmark for BinaryBenchmark<B, D> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
let benchmark = BinaryBenchmark::<B, 3> {
shape: [32, 512, 1024].into(),
device: device.clone(),
};
save::<B>(vec![run_benchmark(benchmark)], device, url, token).unwrap();
save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}
fn main() {

View File

@ -54,7 +54,12 @@ impl<B: Backend> Benchmark for ConvTranspose2dBenchmark<B> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
// Shapes
let batch_size = 16;
let channels_in = 16;
@ -85,7 +90,14 @@ fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>)
device: device.clone(),
};
save::<B>(vec![run_benchmark(benchmark)], device, url, token).unwrap();
save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}
fn main() {

View File

@ -113,7 +113,12 @@ fn erf_positive<B: Backend, const D: usize>(x: Tensor<B, D>) -> Tensor<B, D> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
const D: usize = 3;
let shape: Shape<D> = [32, 512, 2048].into();
@ -144,6 +149,7 @@ fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>)
run_benchmark(custom_erf_gelu),
],
device,
feature_name,
url,
token,
)

View File

@ -71,7 +71,12 @@ impl<B: Backend, const D: usize> Benchmark for FromDataBenchmark<B, D> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
const D: usize = 3;
let shape: Shape<D> = [32, 512, 1024].into();
@ -81,6 +86,7 @@ fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>)
save::<B>(
vec![run_benchmark(to_benchmark), run_benchmark(from_benchmark)],
device,
feature_name,
url,
token,
)

View File

@ -42,7 +42,12 @@ impl<B: Backend, const D: usize> Benchmark for MatmulBenchmark<B, D> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
const D: usize = 3;
let batch_size = 3;
let m = 1024;
@ -53,7 +58,14 @@ fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>)
let benchmark = MatmulBenchmark::<B, D>::new(shape_lhs, shape_rhs, device.clone());
save::<B>(vec![run_benchmark(benchmark)], device, url, token).unwrap();
save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}
fn main() {

View File

@ -42,7 +42,12 @@ impl<B: Backend> Benchmark for MaxPool2dBenchmark<B> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
let benchmark = MaxPool2dBenchmark::<B> {
shape: [32, 32, 512, 512].into(),
kernel_size: [5, 5],
@ -52,7 +57,14 @@ fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>)
device: device.clone(),
};
save::<B>(vec![run_benchmark(benchmark)], device, url, token).unwrap();
save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}
fn main() {

View File

@ -35,13 +35,25 @@ impl<B: Backend, const D: usize> Benchmark for UnaryBenchmark<B, D> {
}
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device, url: Option<&str>, token: Option<&str>) {
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
const D: usize = 3;
let shape: Shape<D> = [32, 512, 1024].into();
let benchmark = UnaryBenchmark::<B, D>::new(shape, device.clone());
save::<B>(vec![run_benchmark(benchmark)], device, url, token).unwrap();
save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}
fn main() {

View File

@ -33,12 +33,41 @@ macro_rules! bench_on_backend {
let args: Vec<String> = env::args().collect();
let url = backend_comparison::get_sharing_url(&args);
let token = backend_comparison::get_sharing_token(&args);
#[cfg(feature = "candle-accelerate")]
let feature_name = "candle-accelerate";
#[cfg(feature = "candle-cpu")]
let feature_name = "candle-cpu";
#[cfg(feature = "candle-cuda")]
let feature_name = "candle-cuda";
#[cfg(feature = "candle-metal")]
let feature_name = "candle-metal";
#[cfg(feature = "ndarray")]
let feature_name = "ndarray";
#[cfg(feature = "ndarray-blas-accelerate")]
let feature_name = "ndarray-blas-accelerate";
#[cfg(feature = "ndarray-blas-netlib")]
let feature_name = "ndarray-blas-netlib";
#[cfg(feature = "ndarray-blas-openblas")]
let feature_name = "ndarray-blas-openblas";
#[cfg(feature = "tch-cpu")]
let feature_name = "tch-cpu";
#[cfg(feature = "tch-gpu")]
let feature_name = "tch-gpu";
#[cfg(feature = "wgpu")]
let feature_name = "wgpu";
#[cfg(feature = "wgpu-fusion")]
let feature_name = "wgpu-fusion";
#[cfg(feature = "wgpu")]
{
use burn::backend::wgpu::{AutoGraphicsApi, Wgpu, WgpuDevice};
bench::<Wgpu<AutoGraphicsApi, f32, i32>>(&WgpuDevice::default(), url, token);
bench::<Wgpu<AutoGraphicsApi, f32, i32>>(
&WgpuDevice::default(),
feature_name,
url,
token,
);
}
#[cfg(feature = "tch-gpu")]
@ -49,7 +78,7 @@ macro_rules! bench_on_backend {
let device = LibTorchDevice::Cuda(0);
#[cfg(target_os = "macos")]
let device = LibTorchDevice::Mps;
bench::<LibTorch>(&device, url, token);
bench::<LibTorch>(&device, feature_name, url, token);
}
#[cfg(feature = "tch-cpu")]
@ -57,7 +86,7 @@ macro_rules! bench_on_backend {
use burn::backend::{libtorch::LibTorchDevice, LibTorch};
let device = LibTorchDevice::Cpu;
bench::<LibTorch>(&device, url, token);
bench::<LibTorch>(&device, feature_name, url, token);
}
#[cfg(any(
@ -71,7 +100,7 @@ macro_rules! bench_on_backend {
use burn::backend::NdArray;
let device = NdArrayDevice::Cpu;
bench::<NdArray>(&device, url, token);
bench::<NdArray>(&device, feature_name, url, token);
}
#[cfg(feature = "candle-cpu")]
@ -80,7 +109,7 @@ macro_rules! bench_on_backend {
use burn::backend::Candle;
let device = CandleDevice::Cpu;
bench::<Candle>(&device, url, token);
bench::<Candle>(&device, feature_name, url, token);
}
#[cfg(feature = "candle-cuda")]
@ -89,7 +118,7 @@ macro_rules! bench_on_backend {
use burn::backend::Candle;
let device = CandleDevice::Cuda(0);
bench::<Candle>(&device, url, token);
bench::<Candle>(&device, feature_name, url, token);
}
#[cfg(feature = "candle-metal")]
@ -98,7 +127,7 @@ macro_rules! bench_on_backend {
use burn::backend::Candle;
let device = CandleDevice::Metal(0);
bench::<Candle>(&device, url, token);
bench::<Candle>(&device, feature_name, url, token);
}
};
}

View File

@ -15,6 +15,7 @@ use std::{fs, io::Write};
pub struct BenchmarkRecord {
backend: String,
device: String,
feature: String,
system_info: BenchmarkSystemInfo,
pub results: BenchmarkResult,
}
@ -29,6 +30,7 @@ pub struct BenchmarkRecord {
/// {
/// "backend": "backend name",
/// "device": "device name",
/// "feature": "feature name",
/// "gitHash": "hash",
/// "max": "duration in microseconds",
/// "mean": "duration in microseconds",
@ -49,6 +51,7 @@ pub struct BenchmarkRecord {
pub fn save<B: Backend>(
benches: Vec<BenchmarkResult>,
device: &B::Device,
feature: &str,
url: Option<&str>,
token: Option<&str>,
) -> Result<Vec<BenchmarkRecord>, std::io::Error> {
@ -71,6 +74,7 @@ pub fn save<B: Backend>(
.map(|bench| BenchmarkRecord {
backend: B::name().to_string(),
device: format!("{:?}", device),
feature: feature.to_string(),
system_info: BenchmarkSystemInfo::new(),
results: bench,
})
@ -157,6 +161,7 @@ impl Serialize for BenchmarkRecord {
self,
("backend", &self.backend),
("device", &self.device),
("feature", &self.feature),
("gitHash", &self.results.git_hash),
("max", &self.results.computed.max.as_micros()),
("mean", &self.results.computed.mean.as_micros()),
@ -190,6 +195,7 @@ impl<'de> Visitor<'de> for BenchmarkRecordVisitor {
match key.as_str() {
"backend" => br.backend = map.next_value::<String>()?,
"device" => br.device = map.next_value::<String>()?,
"feature" => br.feature = map.next_value::<String>()?,
"gitHash" => br.results.git_hash = map.next_value::<String>()?,
"name" => br.results.name = map.next_value::<String>()?,
"max" => {
@ -243,30 +249,40 @@ pub(crate) struct BenchmarkCollection {
impl Display for BenchmarkCollection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Compute the max length for each column
let mut max_name_len = 0;
let mut max_backend_len = 0;
let mut max_name_len = "Benchmark".len();
let mut max_backend_len = "Backend".len();
let mut max_device_len = "Device".len();
let mut max_feature_len = "Feature".len();
for record in self.records.iter() {
let backend_name = [record.backend.clone(), record.device.clone()].join("-");
max_name_len = max_name_len.max(record.results.name.len());
max_backend_len = max_backend_len.max(backend_name.len());
max_backend_len = max_backend_len.max(record.backend.len());
max_device_len = max_device_len.max(record.device.len());
max_feature_len = max_feature_len.max(record.feature.len());
}
// Header
writeln!(
f,
"| {:<width_name$} | {:<width_backend$} | Median |\n|{:->width_name$}--|{:->width_backend$}--|----------------|",
"Benchmark", "Backend", "", "", width_name = max_name_len, width_backend = max_backend_len
"| {:<width_name$} | {:<width_feature$} | {:<width_backend$} | {:<width_device$} | Median |\n|{:->width_name$}--|{:->width_feature$}--|{:->width_backend$}--|{:->width_device$}--|----------------|",
"Benchmark", "Feature", "Backend", "Device", "", "", "", "",
width_name = max_name_len,
width_feature = max_feature_len,
width_backend = max_backend_len,
width_device = max_device_len
)?;
// Table entries
for record in self.records.iter() {
let backend_name = [record.backend.clone(), record.device.clone()].join("-");
writeln!(
f,
"| {:<width_name$} | {:<width_backend$} | {:<15.3?}|",
"| {:<width_name$} | {:<width_feature$} | {:<width_backend$} | {:<width_device$} | {:<15.3?}|",
record.results.name,
backend_name,
record.feature,
record.backend,
record.device,
record.results.computed.median,
width_name = max_name_len,
width_backend = max_backend_len
width_feature = max_feature_len,
width_backend = max_backend_len,
width_device = max_device_len
)?;
}
Ok(())
@ -283,6 +299,7 @@ mod tests {
"backend": "candle",
"device": "Cuda(0)",
"gitHash": "02d37011ab4dc773286e5983c09cde61f95ba4b5",
"feature": "wgpu-fusion",
"name": "unary",
"max": 8858,
"mean": 8629,
@ -345,6 +362,7 @@ mod tests {
let record = serde_json::from_str::<BenchmarkRecord>(sample_result).unwrap();
assert!(record.backend == "candle");
assert!(record.device == "Cuda(0)");
assert!(record.feature == "wgpu-fusion");
assert!(record.results.git_hash == "02d37011ab4dc773286e5983c09cde61f95ba4b5");
assert!(record.results.name == "unary");
assert!(record.results.computed.max.as_micros() == 8858);