feat(third_party): add oatpp,googltest,benchmark
All checks were successful
sm-rpc / build (Debug, aarch64-linux-gnu) (push) Successful in 1m7s
sm-rpc / build (Debug, arm-linux-gnueabihf) (push) Successful in 1m15s
sm-rpc / build (Debug, host.gcc) (push) Successful in 1m4s
sm-rpc / build (Debug, mipsel-linux-gnu) (push) Successful in 1m16s
sm-rpc / build (Release, aarch64-linux-gnu) (push) Successful in 1m34s
sm-rpc / build (Release, arm-linux-gnueabihf) (push) Successful in 1m33s
sm-rpc / build (Release, host.gcc) (push) Successful in 1m23s
sm-rpc / build (Release, mipsel-linux-gnu) (push) Successful in 1m30s
All checks were successful
sm-rpc / build (Debug, aarch64-linux-gnu) (push) Successful in 1m7s
sm-rpc / build (Debug, arm-linux-gnueabihf) (push) Successful in 1m15s
sm-rpc / build (Debug, host.gcc) (push) Successful in 1m4s
sm-rpc / build (Debug, mipsel-linux-gnu) (push) Successful in 1m16s
sm-rpc / build (Release, aarch64-linux-gnu) (push) Successful in 1m34s
sm-rpc / build (Release, arm-linux-gnueabihf) (push) Successful in 1m33s
sm-rpc / build (Release, host.gcc) (push) Successful in 1m23s
sm-rpc / build (Release, mipsel-linux-gnu) (push) Successful in 1m30s
This commit is contained in:
27
third_party/benchmark/bindings/python/google_benchmark/BUILD
vendored
Normal file
27
third_party/benchmark/bindings/python/google_benchmark/BUILD
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@nanobind_bazel//:build_defs.bzl", "nanobind_extension")
|
||||
|
||||
py_library(
|
||||
name = "google_benchmark",
|
||||
srcs = ["__init__.py"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":_benchmark",
|
||||
],
|
||||
)
|
||||
|
||||
nanobind_extension(
|
||||
name = "_benchmark",
|
||||
srcs = ["benchmark.cc"],
|
||||
deps = ["//:benchmark"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "example",
|
||||
srcs = ["example.py"],
|
||||
python_version = "PY3",
|
||||
srcs_version = "PY3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":google_benchmark",
|
||||
],
|
||||
)
|
141
third_party/benchmark/bindings/python/google_benchmark/__init__.py
vendored
Normal file
141
third_party/benchmark/bindings/python/google_benchmark/__init__.py
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
# Copyright 2020 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Python benchmarking utilities.
|
||||
|
||||
Example usage:
|
||||
import google_benchmark as benchmark
|
||||
|
||||
@benchmark.register
|
||||
def my_benchmark(state):
|
||||
... # Code executed outside `while` loop is not timed.
|
||||
|
||||
while state:
|
||||
... # Code executed within `while` loop is timed.
|
||||
|
||||
if __name__ == '__main__':
|
||||
benchmark.main()
|
||||
"""
|
||||
|
||||
import atexit
|
||||
|
||||
from absl import app
|
||||
|
||||
from google_benchmark import _benchmark
|
||||
from google_benchmark._benchmark import (
|
||||
Counter as Counter,
|
||||
State as State,
|
||||
kMicrosecond as kMicrosecond,
|
||||
kMillisecond as kMillisecond,
|
||||
kNanosecond as kNanosecond,
|
||||
kSecond as kSecond,
|
||||
o1 as o1,
|
||||
oAuto as oAuto,
|
||||
oLambda as oLambda,
|
||||
oLogN as oLogN,
|
||||
oN as oN,
|
||||
oNCubed as oNCubed,
|
||||
oNLogN as oNLogN,
|
||||
oNone as oNone,
|
||||
oNSquared as oNSquared,
|
||||
)
|
||||
from google_benchmark.version import __version__ as __version__
|
||||
|
||||
|
||||
class __OptionMaker:
|
||||
"""A stateless class to collect benchmark options.
|
||||
|
||||
Collect all decorator calls like @option.range(start=0, limit=1<<5).
|
||||
"""
|
||||
|
||||
class Options:
|
||||
"""Pure data class to store options calls, along with the benchmarked function."""
|
||||
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
self.builder_calls = []
|
||||
|
||||
@classmethod
|
||||
def make(cls, func_or_options):
|
||||
"""Make Options from Options or the benchmarked function."""
|
||||
if isinstance(func_or_options, cls.Options):
|
||||
return func_or_options
|
||||
return cls.Options(func_or_options)
|
||||
|
||||
def __getattr__(self, builder_name):
|
||||
"""Append option call in the Options."""
|
||||
|
||||
# The function that get returned on @option.range(start=0, limit=1<<5).
|
||||
def __builder_method(*args, **kwargs):
|
||||
# The decorator that get called, either with the benchmared function
|
||||
# or the previous Options
|
||||
def __decorator(func_or_options):
|
||||
options = self.make(func_or_options)
|
||||
options.builder_calls.append((builder_name, args, kwargs))
|
||||
# The decorator returns Options so it is not technically a decorator
|
||||
# and needs a final call to @register
|
||||
return options
|
||||
|
||||
return __decorator
|
||||
|
||||
return __builder_method
|
||||
|
||||
|
||||
# Alias for nicer API.
|
||||
# We have to instantiate an object, even if stateless, to be able to use __getattr__
|
||||
# on option.range
|
||||
option = __OptionMaker()
|
||||
|
||||
|
||||
def register(undefined=None, *, name=None):
|
||||
"""Register function for benchmarking."""
|
||||
if undefined is None:
|
||||
# Decorator is called without parenthesis so we return a decorator
|
||||
return lambda f: register(f, name=name)
|
||||
|
||||
# We have either the function to benchmark (simple case) or an instance of Options
|
||||
# (@option._ case).
|
||||
options = __OptionMaker.make(undefined)
|
||||
|
||||
if name is None:
|
||||
name = options.func.__name__
|
||||
|
||||
# We register the benchmark and reproduce all the @option._ calls onto the
|
||||
# benchmark builder pattern
|
||||
benchmark = _benchmark.RegisterBenchmark(name, options.func)
|
||||
for name, args, kwargs in options.builder_calls[::-1]:
|
||||
getattr(benchmark, name)(*args, **kwargs)
|
||||
|
||||
# return the benchmarked function because the decorator does not modify it
|
||||
return options.func
|
||||
|
||||
|
||||
def _flags_parser(argv):
|
||||
argv = _benchmark.Initialize(argv)
|
||||
return app.parse_flags_with_usage(argv)
|
||||
|
||||
|
||||
def _run_benchmarks(argv):
|
||||
if len(argv) > 1:
|
||||
raise app.UsageError("Too many command-line arguments.")
|
||||
return _benchmark.RunSpecifiedBenchmarks()
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
|
||||
|
||||
|
||||
# Methods for use with custom main function.
|
||||
initialize = _benchmark.Initialize
|
||||
run_benchmarks = _benchmark.RunSpecifiedBenchmarks
|
||||
atexit.register(_benchmark.ClearRegisteredBenchmarks)
|
184
third_party/benchmark/bindings/python/google_benchmark/benchmark.cc
vendored
Normal file
184
third_party/benchmark/bindings/python/google_benchmark/benchmark.cc
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
// Benchmark for Python.
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
#include "nanobind/nanobind.h"
|
||||
#include "nanobind/operators.h"
|
||||
#include "nanobind/stl/bind_map.h"
|
||||
#include "nanobind/stl/string.h"
|
||||
#include "nanobind/stl/vector.h"
|
||||
|
||||
NB_MAKE_OPAQUE(benchmark::UserCounters);
|
||||
|
||||
namespace {
|
||||
namespace nb = nanobind;
|
||||
|
||||
std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
|
||||
// The `argv` pointers here become invalid when this function returns, but
|
||||
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
|
||||
// so it persists, and replace the pointer below.
|
||||
static std::string executable_name(argv[0]);
|
||||
std::vector<char*> ptrs;
|
||||
ptrs.reserve(argv.size());
|
||||
for (auto& arg : argv) {
|
||||
ptrs.push_back(const_cast<char*>(arg.c_str()));
|
||||
}
|
||||
ptrs[0] = const_cast<char*>(executable_name.c_str());
|
||||
int argc = static_cast<int>(argv.size());
|
||||
benchmark::Initialize(&argc, ptrs.data());
|
||||
std::vector<std::string> remaining_argv;
|
||||
remaining_argv.reserve(argc);
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
remaining_argv.emplace_back(ptrs[i]);
|
||||
}
|
||||
return remaining_argv;
|
||||
}
|
||||
|
||||
benchmark::internal::Benchmark* RegisterBenchmark(const std::string& name,
|
||||
nb::callable f) {
|
||||
return benchmark::RegisterBenchmark(
|
||||
name, [f](benchmark::State& state) { f(&state); });
|
||||
}
|
||||
|
||||
NB_MODULE(_benchmark, m) {
|
||||
|
||||
using benchmark::TimeUnit;
|
||||
nb::enum_<TimeUnit>(m, "TimeUnit")
|
||||
.value("kNanosecond", TimeUnit::kNanosecond)
|
||||
.value("kMicrosecond", TimeUnit::kMicrosecond)
|
||||
.value("kMillisecond", TimeUnit::kMillisecond)
|
||||
.value("kSecond", TimeUnit::kSecond)
|
||||
.export_values();
|
||||
|
||||
using benchmark::BigO;
|
||||
nb::enum_<BigO>(m, "BigO")
|
||||
.value("oNone", BigO::oNone)
|
||||
.value("o1", BigO::o1)
|
||||
.value("oN", BigO::oN)
|
||||
.value("oNSquared", BigO::oNSquared)
|
||||
.value("oNCubed", BigO::oNCubed)
|
||||
.value("oLogN", BigO::oLogN)
|
||||
.value("oNLogN", BigO::oNLogN)
|
||||
.value("oAuto", BigO::oAuto)
|
||||
.value("oLambda", BigO::oLambda)
|
||||
.export_values();
|
||||
|
||||
using benchmark::internal::Benchmark;
|
||||
nb::class_<Benchmark>(m, "Benchmark")
|
||||
// For methods returning a pointer to the current object, reference
|
||||
// return policy is used to ask nanobind not to take ownership of the
|
||||
// returned object and avoid calling delete on it.
|
||||
// https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
|
||||
//
|
||||
// For methods taking a const std::vector<...>&, a copy is created
|
||||
// because a it is bound to a Python list.
|
||||
// https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
.def("unit", &Benchmark::Unit, nb::rv_policy::reference)
|
||||
.def("arg", &Benchmark::Arg, nb::rv_policy::reference)
|
||||
.def("args", &Benchmark::Args, nb::rv_policy::reference)
|
||||
.def("range", &Benchmark::Range, nb::rv_policy::reference,
|
||||
nb::arg("start"), nb::arg("limit"))
|
||||
.def("dense_range", &Benchmark::DenseRange,
|
||||
nb::rv_policy::reference, nb::arg("start"),
|
||||
nb::arg("limit"), nb::arg("step") = 1)
|
||||
.def("ranges", &Benchmark::Ranges, nb::rv_policy::reference)
|
||||
.def("args_product", &Benchmark::ArgsProduct,
|
||||
nb::rv_policy::reference)
|
||||
.def("arg_name", &Benchmark::ArgName, nb::rv_policy::reference)
|
||||
.def("arg_names", &Benchmark::ArgNames,
|
||||
nb::rv_policy::reference)
|
||||
.def("range_pair", &Benchmark::RangePair,
|
||||
nb::rv_policy::reference, nb::arg("lo1"), nb::arg("hi1"),
|
||||
nb::arg("lo2"), nb::arg("hi2"))
|
||||
.def("range_multiplier", &Benchmark::RangeMultiplier,
|
||||
nb::rv_policy::reference)
|
||||
.def("min_time", &Benchmark::MinTime, nb::rv_policy::reference)
|
||||
.def("min_warmup_time", &Benchmark::MinWarmUpTime,
|
||||
nb::rv_policy::reference)
|
||||
.def("iterations", &Benchmark::Iterations,
|
||||
nb::rv_policy::reference)
|
||||
.def("repetitions", &Benchmark::Repetitions,
|
||||
nb::rv_policy::reference)
|
||||
.def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
|
||||
nb::rv_policy::reference, nb::arg("value") = true)
|
||||
.def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
|
||||
nb::rv_policy::reference, nb::arg("value") = true)
|
||||
.def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
|
||||
nb::rv_policy::reference)
|
||||
.def("use_real_time", &Benchmark::UseRealTime,
|
||||
nb::rv_policy::reference)
|
||||
.def("use_manual_time", &Benchmark::UseManualTime,
|
||||
nb::rv_policy::reference)
|
||||
.def(
|
||||
"complexity",
|
||||
(Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
|
||||
nb::rv_policy::reference,
|
||||
nb::arg("complexity") = benchmark::oAuto);
|
||||
|
||||
using benchmark::Counter;
|
||||
nb::class_<Counter> py_counter(m, "Counter");
|
||||
|
||||
nb::enum_<Counter::Flags>(py_counter, "Flags")
|
||||
.value("kDefaults", Counter::Flags::kDefaults)
|
||||
.value("kIsRate", Counter::Flags::kIsRate)
|
||||
.value("kAvgThreads", Counter::Flags::kAvgThreads)
|
||||
.value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate)
|
||||
.value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant)
|
||||
.value("kIsIterationInvariantRate",
|
||||
Counter::Flags::kIsIterationInvariantRate)
|
||||
.value("kAvgIterations", Counter::Flags::kAvgIterations)
|
||||
.value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate)
|
||||
.value("kInvert", Counter::Flags::kInvert)
|
||||
.export_values()
|
||||
.def(nb::self | nb::self);
|
||||
|
||||
nb::enum_<Counter::OneK>(py_counter, "OneK")
|
||||
.value("kIs1000", Counter::OneK::kIs1000)
|
||||
.value("kIs1024", Counter::OneK::kIs1024)
|
||||
.export_values();
|
||||
|
||||
py_counter
|
||||
.def(nb::init<double, Counter::Flags, Counter::OneK>(),
|
||||
nb::arg("value") = 0., nb::arg("flags") = Counter::kDefaults,
|
||||
nb::arg("k") = Counter::kIs1000)
|
||||
.def("__init__", ([](Counter *c, double value) { new (c) Counter(value); }))
|
||||
.def_rw("value", &Counter::value)
|
||||
.def_rw("flags", &Counter::flags)
|
||||
.def_rw("oneK", &Counter::oneK)
|
||||
.def(nb::init_implicit<double>());
|
||||
|
||||
nb::implicitly_convertible<nb::int_, Counter>();
|
||||
|
||||
nb::bind_map<benchmark::UserCounters>(m, "UserCounters");
|
||||
|
||||
using benchmark::State;
|
||||
nb::class_<State>(m, "State")
|
||||
.def("__bool__", &State::KeepRunning)
|
||||
.def_prop_ro("keep_running", &State::KeepRunning)
|
||||
.def("pause_timing", &State::PauseTiming)
|
||||
.def("resume_timing", &State::ResumeTiming)
|
||||
.def("skip_with_error", &State::SkipWithError)
|
||||
.def_prop_ro("error_occurred", &State::error_occurred)
|
||||
.def("set_iteration_time", &State::SetIterationTime)
|
||||
.def_prop_rw("bytes_processed", &State::bytes_processed,
|
||||
&State::SetBytesProcessed)
|
||||
.def_prop_rw("complexity_n", &State::complexity_length_n,
|
||||
&State::SetComplexityN)
|
||||
.def_prop_rw("items_processed", &State::items_processed,
|
||||
&State::SetItemsProcessed)
|
||||
.def("set_label", &State::SetLabel)
|
||||
.def("range", &State::range, nb::arg("pos") = 0)
|
||||
.def_prop_ro("iterations", &State::iterations)
|
||||
.def_prop_ro("name", &State::name)
|
||||
.def_rw("counters", &State::counters)
|
||||
.def_prop_ro("thread_index", &State::thread_index)
|
||||
.def_prop_ro("threads", &State::threads);
|
||||
|
||||
m.def("Initialize", Initialize);
|
||||
m.def("RegisterBenchmark", RegisterBenchmark,
|
||||
nb::rv_policy::reference);
|
||||
m.def("RunSpecifiedBenchmarks",
|
||||
[]() { benchmark::RunSpecifiedBenchmarks(); });
|
||||
m.def("ClearRegisteredBenchmarks", benchmark::ClearRegisteredBenchmarks);
|
||||
};
|
||||
} // namespace
|
139
third_party/benchmark/bindings/python/google_benchmark/example.py
vendored
Normal file
139
third_party/benchmark/bindings/python/google_benchmark/example.py
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
# Copyright 2020 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Example of Python using C++ benchmark framework.
|
||||
|
||||
To run this example, you must first install the `google_benchmark` Python package.
|
||||
|
||||
To install using `setup.py`, download and extract the `google_benchmark` source.
|
||||
In the extracted directory, execute:
|
||||
python setup.py install
|
||||
"""
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
import google_benchmark as benchmark
|
||||
from google_benchmark import Counter
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def empty(state):
|
||||
while state:
|
||||
pass
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def sum_million(state):
|
||||
while state:
|
||||
sum(range(1_000_000))
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def pause_timing(state):
|
||||
"""Pause timing every iteration."""
|
||||
while state:
|
||||
# Construct a list of random ints every iteration without timing it
|
||||
state.pause_timing()
|
||||
random_list = [random.randint(0, 100) for _ in range(100)]
|
||||
state.resume_timing()
|
||||
# Time the in place sorting algorithm
|
||||
random_list.sort()
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def skipped(state):
|
||||
if True: # Test some predicate here.
|
||||
state.skip_with_error("some error")
|
||||
return # NOTE: You must explicitly return, or benchmark will continue.
|
||||
|
||||
... # Benchmark code would be here.
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def manual_timing(state):
|
||||
while state:
|
||||
# Manually count Python CPU time
|
||||
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
|
||||
# Something to benchmark
|
||||
time.sleep(0.01)
|
||||
end = time.perf_counter()
|
||||
state.set_iteration_time(end - start)
|
||||
|
||||
|
||||
@benchmark.register
|
||||
def custom_counters(state):
|
||||
"""Collect custom metric using benchmark.Counter."""
|
||||
num_foo = 0.0
|
||||
while state:
|
||||
# Benchmark some code here
|
||||
pass
|
||||
# Collect some custom metric named foo
|
||||
num_foo += 0.13
|
||||
|
||||
# Automatic Counter from numbers.
|
||||
state.counters["foo"] = num_foo
|
||||
# Set a counter as a rate.
|
||||
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
|
||||
# Set a counter as an inverse of rate.
|
||||
state.counters["foo_inv_rate"] = Counter(
|
||||
num_foo, Counter.kIsRate | Counter.kInvert
|
||||
)
|
||||
# Set a counter as a thread-average quantity.
|
||||
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
|
||||
# There's also a combined flag:
|
||||
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
|
||||
|
||||
|
||||
@benchmark.register
|
||||
@benchmark.option.measure_process_cpu_time()
|
||||
@benchmark.option.use_real_time()
|
||||
def with_options(state):
|
||||
while state:
|
||||
sum(range(1_000_000))
|
||||
|
||||
|
||||
@benchmark.register(name="sum_million_microseconds")
|
||||
@benchmark.option.unit(benchmark.kMicrosecond)
|
||||
def with_options2(state):
|
||||
while state:
|
||||
sum(range(1_000_000))
|
||||
|
||||
|
||||
@benchmark.register
|
||||
@benchmark.option.arg(100)
|
||||
@benchmark.option.arg(1000)
|
||||
def passing_argument(state):
|
||||
while state:
|
||||
sum(range(state.range(0)))
|
||||
|
||||
|
||||
@benchmark.register
|
||||
@benchmark.option.range(8, limit=8 << 10)
|
||||
def using_range(state):
|
||||
while state:
|
||||
sum(range(state.range(0)))
|
||||
|
||||
|
||||
@benchmark.register
|
||||
@benchmark.option.range_multiplier(2)
|
||||
@benchmark.option.range(1 << 10, 1 << 18)
|
||||
@benchmark.option.complexity(benchmark.oN)
|
||||
def computing_complexity(state):
|
||||
while state:
|
||||
sum(range(state.range(0)))
|
||||
state.complexity_n = state.range(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
benchmark.main()
|
7
third_party/benchmark/bindings/python/google_benchmark/version.py
vendored
Normal file
7
third_party/benchmark/bindings/python/google_benchmark/version.py
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
|
||||
try:
|
||||
__version__ = version("google-benchmark")
|
||||
except PackageNotFoundError:
|
||||
# package is not installed
|
||||
pass
|
Reference in New Issue
Block a user