viva_telemetry/bench

viva_telemetry/bench - Statistical benchmarking for Gleam

Inspired by: criterion (Rust), benchee (Elixir), hyperfine

Features

Quick Start

import viva_telemetry/bench

pub fn main() {
  bench.run("my_function", fn() { my_function() })
  |> bench.print()
}

Types

Benchmark result

pub type BenchResult {
  BenchResult(name: String, samples: List(Int), stats: Stats)
}

Constructors

  • BenchResult(name: String, samples: List(Int), stats: Stats)

Comparison between two benchmarks

pub type Comparison {
  Comparison(
    baseline: String,
    target: String,
    speedup: Float,
    significant: Bool,
    ci_95: #(Float, Float),
  )
}

Constructors

  • Comparison(
      baseline: String,
      target: String,
      speedup: Float,
      significant: Bool,
      ci_95: #(Float, Float),
    )

Benchmark configuration

pub type Config {
  Config(
    warmup_iterations: Int,
    iterations: Int,
    confidence: Float,
  )
}

Constructors

  • Config(
      warmup_iterations: Int,
      iterations: Int,
      confidence: Float,
    )

Statistical summary

pub type Stats {
  Stats(
    mean: Float,
    stddev: Float,
    min: Int,
    max: Int,
    p50: Int,
    p95: Int,
    p99: Int,
    ips: Float,
    ci_95: #(Float, Float),
  )
}

Constructors

  • Stats(
      mean: Float,
      stddev: Float,
      min: Int,
      max: Int,
      p50: Int,
      p95: Int,
      p99: Int,
      ips: Float,
      ci_95: #(Float, Float),
    )

Values

pub fn compare(
  baseline: BenchResult,
  target: BenchResult,
) -> Comparison

Compare two benchmark results

pub fn config(warmup: Int, iterations: Int) -> Config

Config with custom iterations

pub fn default_config() -> Config

Default configuration

pub fn print(result: BenchResult) -> Nil

Print benchmark result to console

pub fn print_comparison(cmp: Comparison) -> Nil

Print comparison result

pub fn run(name: String, f: fn() -> a) -> BenchResult

Run a single benchmark with default config

pub fn run_all(
  benchmarks: List(#(String, fn() -> a)),
  cfg: Config,
) -> List(BenchResult)

Run multiple benchmarks

pub fn run_with_config(
  name: String,
  f: fn() -> a,
  cfg: Config,
) -> BenchResult

Run benchmark with custom config

pub fn to_json(result: BenchResult) -> json.Json

Export to JSON

pub fn to_json_string(result: BenchResult) -> String

Export to JSON string

pub fn to_markdown(result: BenchResult) -> String

Export to Markdown table row

pub fn to_markdown_table(results: List(BenchResult)) -> String

Export multiple results to Markdown table

Search Document