Skip to content

Benchmarking

Utilities for benchmarking GPU and CPU operations.

torchlinops.utils.benchmark

CupyHandler

Benchmarking class for CuPy-based functions

Usage:

Source code in src/torchlinops/utils/benchmark.py
class CupyHandler:
    """Benchmarking class for CuPy-based functions

    Usage:


    """

    def __init__(self):
        self.reset()

    def reset(self):
        self._start_event = None
        self._end_event = None
        self._mempool = None

        self.result = EasyDict(
            {
                "timings_ms": [],
                "max_mem_bytes": None,
            }
        )

    def bench_start(self, *args, **kwargs):
        self.reset()
        gc.disable()
        self._mempool = cp.get_default_memory_pool()
        self._mempool.free_all_blocks()

    def bench_end(self, *args, **kwargs):
        self.result.max_mem_bytes = self._mempool.total_bytes()
        gc.enable()

    def trial_start(self, event=None, i=None):
        self._start_event = cp.cuda.Event(disable_timing=False)
        self._end_event = cp.cuda.Event(disable_timing=False)
        self._start_event.record()

    def trial_end(self, event=None, i=None):
        self._end_event.record()
        self._end_event.synchronize()
        time = cp.cuda.get_elapsed_time(self._start_event, self._end_event)  # ms
        self.result.timings_ms.append(time)

    def collect_results(self, *args, **kwargs):
        return {"cupy": copy.deepcopy(self.result)}

benchmark

benchmark(
    fn,
    *args,
    num_iters: int = 10,
    backend: Literal["torch", "cupy"] = "torch",
    **kwargs,
)

Benchmark a function called with some arguments.

Defaults to torch benchmarking

Source code in src/torchlinops/utils/benchmark.py
def benchmark(
    fn,
    *args,
    num_iters: int = 10,
    backend: Literal["torch", "cupy"] = "torch",
    **kwargs,
):
    """Benchmark a function called with some arguments.

    Defaults to torch benchmarking
    """
    if backend == "torch":
        backend = TorchHandler()
    elif backend == "cupy":
        backend = CupyHandler()
    else:
        raise ValueError(f"Unrecognized backend type {backend}")
    fn_result = fn(*args, **kwargs)  # Warmup
    backend.bench_start()
    for _ in range(num_iters):
        backend.trial_start()
        fn(*args, **kwargs)
        backend.trial_end()
    backend.bench_end()
    return backend.result, fn_result

benchmark_and_summarize

benchmark_and_summarize(
    fn,
    *args,
    num_iters: int = 10,
    ignore_first: int = 0,
    backend: Literal["torch", "cupy"] = "torch",
    name: str = None,
    **kwargs,
)

Convenience function

Source code in src/torchlinops/utils/benchmark.py
def benchmark_and_summarize(
    fn,
    *args,
    num_iters: int = 10,
    ignore_first: int = 0,
    backend: Literal["torch", "cupy"] = "torch",
    name: str = None,
    **kwargs,
):
    """Convenience function"""
    result, output = benchmark(
        fn, *args, num_iters=num_iters, backend=backend, **kwargs
    )
    if name is None:
        name = fn.__name__
    summarize(result, name=name, ignore_first=ignore_first)
    return result, output

summarize

summarize(
    benchmark_result, name: str, ignore_first: int = 0
)

Summarize the results from benchmark

Source code in src/torchlinops/utils/benchmark.py
def summarize(benchmark_result, name: str, ignore_first: int = 0):
    """Summarize the results from benchmark"""
    with Indenter() as indent:
        print(name)
        with indent:
            timings_ms = benchmark_result["timings_ms"][ignore_first:]
            indent.print(f"Mean Time: {np.mean(timings_ms):0.3f} ms")
            indent.print(f"Min Time: {np.min(timings_ms):0.3f} ms")
            indent.print(f"Max Time: {np.max(timings_ms):0.3f} ms")
            indent.print(f"Memory: {benchmark_result['max_mem_bytes']} bytes")