Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
version = "1.50.0"

[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CodecBzip2 = "523fee87-0ab8-5b00-afb7-3ecf72e48cfd"
CodecZlib = "944b1d66-785c-5afd-91f1-9de20f533193"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Expand All @@ -18,6 +17,12 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[weakdeps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"

[extensions]
MathOptInterfaceBenchmarkToolsExt = "BenchmarkTools"

[compat]
BenchmarkTools = "1"
CodecBzip2 = "0.6, 0.7, 0.8"
Expand All @@ -38,8 +43,9 @@ Test = "1"
julia = "1.10"

[extras]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
JSONSchema = "7d188eb4-7ad8-530c-ae41-71a32a6d4692"
ParallelTestRunner = "d3525ed8-44d0-4b2c-a655-542cee43accc"

[targets]
test = ["JSONSchema", "ParallelTestRunner"]
test = ["BenchmarkTools", "JSONSchema", "ParallelTestRunner"]
21 changes: 19 additions & 2 deletions docs/src/submodules/Benchmarks/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,29 @@ DocTestFilters = [r"MathOptInterface|MOI"]
# The `Benchmarks` submodule

To aid the development of efficient solver wrappers, MathOptInterface provides
benchmarking capability. Benchmarking a wrapper follows a two-step process.
a suite of benchmarks in the `MOI.Benchmarks` submodule.

!!! warning
To use this submodule you must first install and load
[BenchmarkTools.jl](https://github.com/juliaci/benchmarktools.jl).
```julia
import Pkg
Pkg.add("BenchmarkTools")
import BenchmarkTools
```

## Benchmarking a solver wrapper

Benchmarking a wrapper follows a two-step process.

First, prior to making changes, create a baseline for the benchmark results on a
given benchmark suite as follows:

```julia
using SolverPackage # Replace with your choice of solver.
# You must load BenchmarkTools.jl to enable MOI.Benchmarks
import BenchmarkTools
# Replace `SolverPackage` with your choice of solver
using SolverPackage
import MathOptInterface as MOI

suite = MOI.Benchmarks.suite() do
Expand All @@ -33,6 +49,7 @@ Second, after making changes to the package, re-run the benchmark suite and
compare to the prior saved results:

```julia
import BenchmarkTools
using SolverPackage
import MathOptInterface as MOI

Expand Down
79 changes: 79 additions & 0 deletions ext/MathOptInterfaceBenchmarkToolsExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
# Copyright (c) 2017: Miles Lubin and contributors
# Copyright (c) 2017: Google Inc.
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.

module MathOptInterfaceBenchmarkToolsExt

import BenchmarkTools
import MathOptInterface as MOI

function MOI.Benchmarks.suite(
new_model::Function;
exclude::Vector{Regex} = Regex[],
)
group = BenchmarkTools.BenchmarkGroup()
for (name, func) in MOI.Benchmarks.BENCHMARKS
if any(occursin.(exclude, Ref(name)))
continue
end
group[name] = BenchmarkTools.@benchmarkable $func($new_model)
end
return group
end

function MOI.Benchmarks.create_baseline(
suite::BenchmarkTools.BenchmarkGroup,
name::String;
directory::String = "",
kwargs...,
)
BenchmarkTools.tune!(suite)
BenchmarkTools.save(
joinpath(directory, name * "_params.json"),
BenchmarkTools.params(suite),
)
results = BenchmarkTools.run(suite; kwargs...)
BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results)
return
end

function MOI.Benchmarks.compare_against_baseline(
suite::BenchmarkTools.BenchmarkGroup,
name::String;
directory::String = "",
report_filename::String = "report.txt",
kwargs...,
)
params_filename = joinpath(directory, name * "_params.json")
baseline_filename = joinpath(directory, name * "_baseline.json")
if !isfile(params_filename) || !isfile(baseline_filename)
error("You create a baseline with `create_baseline` first.")
end
BenchmarkTools.loadparams!(
suite,
BenchmarkTools.load(params_filename)[1],
:evals,
:samples,
)
new_results = BenchmarkTools.run(suite; kwargs...)
old_results = BenchmarkTools.load(baseline_filename)[1]
open(joinpath(directory, report_filename), "w") do io
println(stdout, "\n========== Results ==========")
println(io, "\n========== Results ==========")
for key in keys(new_results)
judgement = BenchmarkTools.judge(
BenchmarkTools.median(new_results[key]),
BenchmarkTools.median(old_results[key]),
)
println(stdout, "\n", key)
println(io, "\n", key)
show(stdout, MIME"text/plain"(), judgement)
show(io, MIME"text/plain"(), judgement)
end
end
return
end

end # module
92 changes: 25 additions & 67 deletions src/Benchmarks/Benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

module Benchmarks

import BenchmarkTools
import MathOptInterface as MOI

const BENCHMARKS = Dict{String,Function}()
Expand All @@ -22,26 +21,24 @@ arguments, and returns a new instance of the optimizer you wish to benchmark.

Use `exclude` to exclude a subset of benchmarks.

## BenchmarkTools

To use this function you must first install and load the `BenchmarkTools.jl`
package.

## Example

```julia
julia> MOI.Benchmarks.suite() do
return GLPK.Optimizer()
end
julia> import BenchmarkTools, GLPK, Gurobi

julia> MOI.Benchmarks.suite(GLPK.Optimizer)

julia> MOI.Benchmarks.suite(; exclude = [r"delete"]) do
return Gurobi.Optimizer()
end
```
"""
function suite(new_model::Function; exclude::Vector{Regex} = Regex[])
group = BenchmarkTools.BenchmarkGroup()
for (name, func) in BENCHMARKS
any(occursin.(exclude, Ref(name))) && continue
group[name] = BenchmarkTools.@benchmarkable $func($new_model)
end
return group
end
function suite end

"""
create_baseline(suite, name::String; directory::String = ""; kwargs...)
Expand All @@ -50,12 +47,17 @@ Run all benchmarks in `suite` and save to files called `name` in `directory`.

Extra `kwargs` are based to `BenchmarkTools.run`.

## BenchmarkTools

To use this function you must first install and load the `BenchmarkTools.jl`
package.

## Example

```julia
julia> import GLPK
julia> import BenchmarkTools, GLPK

julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer());
julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);

julia> MOI.Benchmarks.create_baseline(
my_suite,
Expand All @@ -65,21 +67,7 @@ julia> MOI.Benchmarks.create_baseline(
)
```
"""
function create_baseline(
suite::BenchmarkTools.BenchmarkGroup,
name::String;
directory::String = "",
kwargs...,
)
BenchmarkTools.tune!(suite)
BenchmarkTools.save(
joinpath(directory, name * "_params.json"),
BenchmarkTools.params(suite),
)
results = BenchmarkTools.run(suite; kwargs...)
BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results)
return
end
function create_baseline end

"""
compare_against_baseline(
Expand All @@ -95,12 +83,17 @@ A report summarizing the comparison is written to `report_filename` in

Extra `kwargs` are based to `BenchmarkTools.run`.

## BenchmarkTools

To use this function you must first install and load the `BenchmarkTools.jl`
package.

## Example

```julia
julia> import GLPK
julia> import BenchmarkTools, GLPK

julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer());
julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);

julia> MOI.Benchmarks.compare_against_baseline(
my_suite,
Expand All @@ -110,42 +103,7 @@ julia> MOI.Benchmarks.compare_against_baseline(
)
```
"""
function compare_against_baseline(
suite::BenchmarkTools.BenchmarkGroup,
name::String;
directory::String = "",
report_filename::String = "report.txt",
kwargs...,
)
params_filename = joinpath(directory, name * "_params.json")
baseline_filename = joinpath(directory, name * "_baseline.json")
if !isfile(params_filename) || !isfile(baseline_filename)
error("You create a baseline with `create_baseline` first.")
end
BenchmarkTools.loadparams!(
suite,
BenchmarkTools.load(params_filename)[1],
:evals,
:samples,
)
new_results = BenchmarkTools.run(suite; kwargs...)
old_results = BenchmarkTools.load(baseline_filename)[1]
open(joinpath(directory, report_filename), "w") do io
println(stdout, "\n========== Results ==========")
println(io, "\n========== Results ==========")
for key in keys(new_results)
judgement = BenchmarkTools.judge(
BenchmarkTools.median(new_results[key]),
BenchmarkTools.median(old_results[key]),
)
println(stdout, "\n", key)
println(io, "\n", key)
show(stdout, MIME"text/plain"(), judgement)
show(io, MIME"text/plain"(), judgement)
end
end
return
end
function compare_against_baseline end

###
### Benchmarks
Expand Down
1 change: 1 addition & 0 deletions test/Benchmarks/test_Benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ module TestBenchmarks

using Test

import BenchmarkTools
import MathOptInterface as MOI

function runtests()
Expand Down
Loading