Benchmark
Utilities for benchmarking.
Example Usage
runBenchmarks
Compare time execution of various implementation of the same operation.
import { runBenchmarks } from "./benchmarks"
const results = await runBenchmarks({
groupKey: "string concat",
runners: [
{ name: "concat", fn: (a: string, b: string) => a + b },
{ name: "concat with join", fn: (a: string, b: string) => [...a, ...b].join("") },
{ name: "concat with reduce", fn: (a: string, b: string) => [...a, ...b].reduce((a, b) => a + b, "") },
],
args: [
'hello',
'nerds'
],
iterations: 500,
warmupCycles: 5, // ms
delayBetween: 10, // ms
});
results.print();
┌─────────┬──────────────────────┬──────────────────┬─────────────┬──────────┬──────────┬──────────┬──────────┬──────────┬──────────┬───────────┬─────────────┬─────────────────┐
│ (index) │ Function │ x slower vs best │ Median (ms) │ Min (ms) │ Max (ms) │ p10 (ms) │ p50 (ms) │ p90 (ms) │ p99 (ms) │ Mean (ms) │ Stddev (ms) │ total time (ms) │
├─────────┼──────────────────────┼──────────────────┼─────────────┼──────────┼──────────┼──────────┼──────────┼──────────┼──────────┼───────────┼─────────────┼─────────────────┤
│ 0 │ 'concat (⭐️ Best)' │ '1.00×' │ '0.000' │ '0.00' │ '0.02' │ '0.00' │ '0.00' │ '0.00' │ '0.00' │ '0.00' │ '0.00' │ '0.21' │
│ 1 │ 'concat with join' │ '6.86×' │ '0.001' │ '0.00' │ '0.66' │ '0.00' │ '0.00' │ '0.00' │ '0.02' │ '0.00' │ '0.03' │ '1.46' │
│ 2 │ 'concat with reduce' │ '5.31×' │ '0.001' │ '0.00' │ '0.33' │ '0.00' │ '0.00' │ '0.00' │ '0.01' │ '0.00' │ '0.02' │ '1.13' │
└─────────┴──────────────────────┴──────────────────┴─────────────┴──────────┴──────────┴──────────┴──────────┴──────────┴──────────┴───────────┴─────────────┴─────────────────┘Inputs:
groupKey: Required.
Used in the output ofprint.runners: Required.
Array of functions. Each array item is an object with:name: string, used in the output ofprint.fn:(...args: any[]) => any | Promise<any> }. Can be sync or async.
args: Optional. Default is[].
Array of arguments to pass to each runner, as an array that will be spread.iterations: Optional. Default is50.
How many times to run each function?warmupCycles: Optional. Default is5.
How many times to run fn in the warmup phase?
This phase runs before the real benchmark in order to allows the JIT to mark the functions as hot and optimize. Use this to avoid a "cold start" effect.delayBetween: Optional. Default is10.
How many ms to wait between each run of each function?
Returns:
print: function that prints the results to console, usingconsole.table.results: Array of objects, one for each runner, with the following structure:name: string, used in the output ofprint.stats: object with the following properties:mean: number, average time in ms.stddev: number, standard deviation against the mean in ms.min: number, minimum time in ms.max: number, maximum time in ms.median: number, median time in ms.p10: number, 10th percentile in ms.p50: number, 50th percentile in ms.p90: number, 90th percentile in ms.p99: number, 99th percentile in ms.
Dependencies
No dependencies
Auto Install
npx shadcn@latest add https://shadcn-registry-ts.vercel.app/r/util-benchmark.json
Manual Install
benchmark.ts
type BenchmarkRunner = {
name: string;
fn: (...args: any[]) => any | Promise<any>;
};
type BenchmarkResult = {
name: string;
times: number[];
stats: ReturnType<typeof calculateStats>,
};
/** Execute a benchamark that compare the time execution of various implementation of the same operation */
export async function runBenchmarks({
groupKey,
runners,
iterations = 50,
args = [],
warmupCycles = 5,
delayBetween = 10, // ms tra run per evitare interferenze
}: {
/** Group name, used in the output of `print` */
groupKey: string;
/** Array of Functions to benchmark */
runners: BenchmarkRunner[];
/** Arguments to pass to each function, as an array that will be spread @default [] */
args?: any[];
/** How many times to run each function? @default 50 */
iterations?: number;
/** How many times to run fn in the warmup phase? @default 5 */
warmupCycles?: number;
/** How many ms to wait between each run? @default 10 */
delayBetween?: number;
}) {
// init the output
const results: BenchmarkResult[] = [];
// for each fn...
for (const { name, fn } of runners) {
// init bag
const iterationsTimeTaken: number[] = [];
// do warmup
// allow the JIT to warm up the functions
for (let i = 0; i < warmupCycles; i++) {
await runAndMeasure(fn, args);
}
// do real measures
for (let i = 0; i < iterations; i++) {
// run the fn
const timeTaken = await runAndMeasure(fn, args);
// add to the bag
iterationsTimeTaken.push(timeTaken);
// sleep
if (delayBetween) {
await sleep(delayBetween);
}
}
// calculate stats for the entire function
results.push({
name,
times: iterationsTimeTaken,
stats: calculateStats(iterationsTimeTaken),
});
}
// sort by fastesrt first
const resultsSorted = [...results].sort((a, b) => a.stats.mean - b.stats.mean);
const bestResult = resultsSorted[0];
const bestResultMean = bestResult.stats.mean;
const isBestResult = (r: BenchmarkResult) => r.name === bestResult.name;
// create a fn that prints the results to console
const print = () => {
console.log(`[${groupKey}] - Iterations: ${iterations} - Warmup: ${warmupCycles}`);
console.table(
results.map(r => ({
Function: isBestResult(r) ? `${r.name} (⭐️ Best)` : r.name,
"x slower vs best": (r.stats.mean / bestResultMean).toFixed(2) + "×",
"Median (ms)": (r.stats.median.toFixed(3)),
"Min (ms)": (r.stats.min.toFixed(2)),
"Max (ms)": (r.stats.max.toFixed(2)),
"p10 (ms)": (r.stats.p10.toFixed(2)),
"p50 (ms)": (r.stats.p50.toFixed(2)),
"p90 (ms)": (r.stats.p90.toFixed(2)),
"p99 (ms)": (r.stats.p99.toFixed(2)),
"Mean (ms)": (r.stats.mean.toFixed(2)),
"Stddev (ms)": (r.stats.stddev.toFixed(2)),
"total time (ms)": (r.stats.totalTime.toFixed(2)),
}))
);
};
return {
results,
print,
};
}
/** Run a function and measure the time taken to execute (supports async/sync) */
async function runAndMeasure(fn: BenchmarkRunner["fn"], args: any[]) {
const start = process.hrtime.bigint();
await fn(...args);
const end = process.hrtime.bigint();
const delta = Number(end - start);
return delta / 1e6; // millisecondi
}
function calculateStats(values: number[]) {
const count = values.length;
const firstIndex = 0;
const lastIndex = count - 1;
const sorted = [...values].sort((a, b) => a - b);
const mean = math.mean(values);
const stddev = math.stddev(values, mean);
const min = sorted[firstIndex];
const max = sorted[lastIndex];
const median = math.median(values, true);
const p99 = sorted[Math.floor(count * 0.99)];
const p90 = sorted[Math.floor(count * 0.95)];
const p50 = sorted[Math.floor(count * 0.5)];
const p10 = sorted[Math.floor(count * 0.1)];
const totalTime = math.sum(values);
return { mean, stddev, min, max, median, p10, p50, p90, p99, totalTime };
}
// utils
const sleep = (ms: number) => new Promise(r => setTimeout(r, ms));
const math = {
sum(nums: number[]): number {
return nums.reduce((acc, num) => acc + num, 0);
},
mean(nums: number[]): number {
return math.sum(nums) / nums.length;
},
median(nums: number[], isSorted = false): number {
const sorted = isSorted ? nums : [...nums].sort((a, b) => a - b); // odd: 0,1,2,3,4 even: 0, 1, 2, 3
const count = nums.length; // odd: 5, even: 4
const isOdd = count % 2 === 1; // odd: true, even: false
if (isOdd) {
const middleItemIndex = Math.floor(count / 2); // 5/2 = 2.5 -> 2
return sorted[middleItemIndex];
}
const middleLeftItemIndex = Math.floor(count / 2) - 1; // 4/2 = 2 -> 1
const middleRightItemIndex = Math.floor(count / 2); // 4/2 = 2 -> 2
const mean = math.mean([sorted[middleLeftItemIndex], sorted[middleRightItemIndex]]);
return mean;
},
stddev(nums: number[], mean?: number): number {
const _mean = mean || math.mean(nums);
const variance = nums.reduce((a, b) => a + (b - _mean) ** 2, 0) / nums.length;
return Math.sqrt(variance);
}
};
Test
benchmark.test.ts
import { describe, it, expect } from "vitest";
import { runBenchmarks } from "./benchmark";
describe("runBenchmarks", () => {
it("check output shape", async () => {
const ITERATIONS = 10;
const result = await runBenchmarks({
groupKey: 'test',
runners: [
{ name: 'fnA', fn: async (x: number) => { console.log(x); } },
{ name: 'fnB', fn: async (x: number) => { } },
],
iterations: ITERATIONS,
warmupCycles: 0,
delayBetween: 0,
});
console.log(result);
expect(result.results.length).toBe(2);
const expectedResultProp = {
name: expect.toBeOneOf(['fnA', 'fnB']),
times: expect.any(Array),
stats: {
min: expect.any(Number),
max: expect.any(Number),
mean: expect.any(Number),
median: expect.any(Number),
stddev: expect.any(Number),
p10: expect.any(Number),
p50: expect.any(Number),
p90: expect.any(Number),
p99: expect.any(Number),
totalTime: expect.any(Number),
},
};
expect(result.results).toMatchObject([
expectedResultProp,
expectedResultProp,
]);
expect(result.results[0].times.length).toBe(ITERATIONS);
expect(result.results[1].times.length).toBe(ITERATIONS);
});
it('dfg', async () => {
const results = await runBenchmarks({
groupKey: "string concat",
runners: [
{ name: "concat", fn: (a: string, b: string) => a + b },
{ name: "concat with join", fn: (a: string, b: string) => [...a, ...b].join("") },
{ name: "concat with reduce", fn: (a: string, b: string) => [...a, ...b].reduce((a, b) => a + b, "") },
],
args: [
'hello',
'nerds'
],
iterations: 500,
warmupCycles: 0,
delayBetween: 0, // ms
});
results.print();
});
});Command Palette
Search for a command to run...