From d10b4e73d21b9eb2075a7186abd030da01c67030 Mon Sep 17 00:00:00 2001 From: Patrick Winter Date: Fri, 16 Aug 2024 14:50:57 +0200 Subject: [PATCH] Add support for teardown handler to pedantic mode Sometimes benchmarks have side effects which need to be cleaned up after every round. For example if a benchmark writes to a file, then you might want to delete it in between rounds and start from a clean slate. It's already possible to pass a setup function to pedantic mode, this PR introduces a similar mechanism but for cleaning up resources after a round has been competed by passing a cleanup function. --- docs/pedantic.rst | 20 +++++++++++++++- src/pytest_benchmark/fixture.py | 17 +++++++++++--- tests/test_pedantic.py | 41 +++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 4 deletions(-) diff --git a/docs/pedantic.rst b/docs/pedantic.rst index fd1da9fb..8629b572 100644 --- a/docs/pedantic.rst +++ b/docs/pedantic.rst @@ -24,7 +24,7 @@ Reference :param kwargs: Named arguments to the ``target`` function. :type setup: callable - :param setup: A function to call right before calling the ``target`` function. + :param setup: A function to call right before calling the ``target`` function in the first iteration of every round. The setup function can also return the arguments for the function (in case you need to create new arguments every time). @@ -43,6 +43,24 @@ Reference if you use a ``setup`` function then you cannot use the ``args``, ``kwargs`` and ``iterations`` options. + :type cleanup: callable + :param cleanup: A function to call after every round. + + .. sourcecode:: python + + def stuff(a, b, c, foo): + pass + + def test_with_setup(benchmark): + def cleanup(): + # cleanup the side effect of the previous benchmark round. + pass + benchmark.pedantic(stuff, cleanup=cleanup, rounds=100) + + .. note:: + + the cleanup function receives the same ``args`` and ``kwargs`` as the ``target``. + :type rounds: int :param rounds: Number of rounds to run. diff --git a/src/pytest_benchmark/fixture.py b/src/pytest_benchmark/fixture.py index e6651e19..76b8c282 100644 --- a/src/pytest_benchmark/fixture.py +++ b/src/pytest_benchmark/fixture.py @@ -158,14 +158,14 @@ def __call__(self, function_to_benchmark, *args, **kwargs): self.has_error = True raise - def pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1): + def pedantic(self, target, args=(), kwargs=None, setup=None, cleanup=None, rounds=1, warmup_rounds=0, iterations=1): if self._mode: self.has_error = True raise FixtureAlreadyUsed(f'Fixture can only be used once. Previously it was used in {self._mode} mode.') try: self._mode = 'benchmark.pedantic(...)' return self._raw_pedantic( - target, args=args, kwargs=kwargs, setup=setup, rounds=rounds, warmup_rounds=warmup_rounds, iterations=iterations + target, args=args, kwargs=kwargs, setup=setup, cleanup=cleanup, rounds=rounds, warmup_rounds=warmup_rounds, iterations=iterations ) except Exception: self.has_error = True @@ -209,7 +209,7 @@ def _raw(self, function_to_benchmark, *args, **kwargs): function_result = function_to_benchmark(*args, **kwargs) return function_result - def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1): + def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, cleanup=None, rounds=1, warmup_rounds=0, iterations=1): if kwargs is None: kwargs = {} @@ -248,6 +248,9 @@ def make_arguments(args=args, kwargs=kwargs): runner = self._make_runner(target, args, kwargs) runner(loops_range) + if cleanup is not None: + cleanup(*args, **kwargs) + for _ in range(rounds): args, kwargs = make_arguments() @@ -258,10 +261,16 @@ def make_arguments(args=args, kwargs=kwargs): duration, result = runner(loops_range) stats.update(duration) + if cleanup is not None: + cleanup(*args, **kwargs) + + if loops_range: # if it has been looped then we don't have the result, we need to do 1 extra run for it args, kwargs = make_arguments() result = target(*args, **kwargs) + if cleanup is not None: + cleanup(*args, **kwargs) if self.cprofile: if self.cprofile_loops is None: @@ -273,6 +282,8 @@ def make_arguments(args=args, kwargs=kwargs): args, kwargs = make_arguments() for _ in cprofile_loops: profile.runcall(target, *args, **kwargs) + if cleanup is not None: + cleanup(*args, **kwargs) self._save_cprofile(profile) return result diff --git a/tests/test_pedantic.py b/tests/test_pedantic.py index 64559134..bcc351d0 100644 --- a/tests/test_pedantic.py +++ b/tests/test_pedantic.py @@ -22,6 +22,19 @@ def setup(): assert runs == [(1, 2)] +def test_cleanup(benchmark): + runs = [] + + def stuff(): + runs.append("stuff") + + def cleanup(): + runs.append("cleanup") + + benchmark.pedantic(stuff, cleanup=cleanup) + assert runs == ["stuff", "cleanup"] + + @pytest.mark.benchmark(cprofile=True) def test_setup_cprofile(benchmark): runs = [] @@ -36,6 +49,22 @@ def setup(): assert runs == [(1, 2), (1, 2)] +@pytest.mark.benchmark(cprofile=True) +def test_cleanup_cprofile(benchmark): + runs = [] + + def stuff(): + runs.append("stuff") + + def cleanup(): + runs.append("cleanup") + + benchmark.pedantic(stuff, cleanup=cleanup) + assert runs == ["stuff", "cleanup", "stuff", "cleanup"] + + runs = [] + + def test_args_kwargs(benchmark): runs = [] @@ -100,6 +129,18 @@ def setup(): benchmark.pedantic(stuff, setup=setup, rounds=10) assert runs == [(1, 2)] * 10 +def test_cleanup_many_rounds(benchmark): + runs = [] + + def stuff(): + runs.append("stuff") + + def cleanup(): + runs.append("cleanup") + + benchmark.pedantic(stuff, cleanup=cleanup, rounds=10) + assert runs == ["stuff", "cleanup"] * 10 + def test_cant_use_both_args_and_setup_with_return(benchmark): runs = []