diff --git a/AUTHORS.rst b/AUTHORS.rst index 0be99c0..fb24db0 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -36,3 +36,4 @@ Authors * Johnny Huang - https://github.com/jnhyperion * Tony Kuo - https://github.com/tony92151 * Eugeniy - https://github.com/zcoder +* Patrick Winter - https://github.com/winpat diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 250f738..c51e871 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ Changelog ========= +Unreleased +---------- + +* Add support for a per-round ``teardown`` function to pedantic mode. + 5.1.0 (2024-10-30) ------------------ diff --git a/docs/pedantic.rst b/docs/pedantic.rst index fd1da9f..b62d604 100644 --- a/docs/pedantic.rst +++ b/docs/pedantic.rst @@ -24,7 +24,7 @@ Reference :param kwargs: Named arguments to the ``target`` function. :type setup: callable - :param setup: A function to call right before calling the ``target`` function. + :param setup: A function to call right before calling the ``target`` function in the first iteration of every round. The setup function can also return the arguments for the function (in case you need to create new arguments every time). @@ -43,6 +43,24 @@ Reference if you use a ``setup`` function then you cannot use the ``args``, ``kwargs`` and ``iterations`` options. + :type teardown: callable + :param teardown: A function to call after the last iteration of every round. + + .. sourcecode:: python + + def stuff(a, b, c, foo): + pass + + def test_with_teardown(benchmark): + def teardown(): + # cleanup the side effect of the previous bench mark round. + pass + benchmark.pedantic(stuff, teardown=teardown, rounds=100) + + .. note:: + + the ``teardown`` function receives the same ``args`` and ``kwargs`` as the ``target``. + :type rounds: int :param rounds: Number of rounds to run. diff --git a/src/pytest_benchmark/fixture.py b/src/pytest_benchmark/fixture.py index e6651e1..a2aa51a 100644 --- a/src/pytest_benchmark/fixture.py +++ b/src/pytest_benchmark/fixture.py @@ -158,14 +158,21 @@ def __call__(self, function_to_benchmark, *args, **kwargs): self.has_error = True raise - def pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1): + def pedantic(self, target, args=(), kwargs=None, setup=None, teardown=None, rounds=1, warmup_rounds=0, iterations=1): if self._mode: self.has_error = True raise FixtureAlreadyUsed(f'Fixture can only be used once. Previously it was used in {self._mode} mode.') try: self._mode = 'benchmark.pedantic(...)' return self._raw_pedantic( - target, args=args, kwargs=kwargs, setup=setup, rounds=rounds, warmup_rounds=warmup_rounds, iterations=iterations + target, + args=args, + kwargs=kwargs, + setup=setup, + teardown=teardown, + rounds=rounds, + warmup_rounds=warmup_rounds, + iterations=iterations, ) except Exception: self.has_error = True @@ -209,7 +216,7 @@ def _raw(self, function_to_benchmark, *args, **kwargs): function_result = function_to_benchmark(*args, **kwargs) return function_result - def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1): + def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, teardown=None, rounds=1, warmup_rounds=0, iterations=1): if kwargs is None: kwargs = {} @@ -248,6 +255,9 @@ def make_arguments(args=args, kwargs=kwargs): runner = self._make_runner(target, args, kwargs) runner(loops_range) + if teardown is not None: + teardown(*args, **kwargs) + for _ in range(rounds): args, kwargs = make_arguments() @@ -258,10 +268,15 @@ def make_arguments(args=args, kwargs=kwargs): duration, result = runner(loops_range) stats.update(duration) + if teardown is not None: + teardown(*args, **kwargs) + if loops_range: # if it has been looped then we don't have the result, we need to do 1 extra run for it args, kwargs = make_arguments() result = target(*args, **kwargs) + if teardown is not None: + teardown(*args, **kwargs) if self.cprofile: if self.cprofile_loops is None: @@ -273,6 +288,8 @@ def make_arguments(args=args, kwargs=kwargs): args, kwargs = make_arguments() for _ in cprofile_loops: profile.runcall(target, *args, **kwargs) + if teardown is not None: + teardown(*args, **kwargs) self._save_cprofile(profile) return result diff --git a/tests/test_pedantic.py b/tests/test_pedantic.py index 6455913..440433c 100644 --- a/tests/test_pedantic.py +++ b/tests/test_pedantic.py @@ -22,6 +22,21 @@ def setup(): assert runs == [(1, 2)] +def test_teardown(benchmark): + runs = [] + + def stuff(foo, bar=1234): + runs.append((foo, bar)) + + def teardown(foo, bar=1234): + assert foo == 1 + assert bar == 2 + runs.append('teardown') + + benchmark.pedantic(stuff, args=[1], kwargs={'bar': 2}, teardown=teardown) + assert runs == [(1, 2), 'teardown'] + + @pytest.mark.benchmark(cprofile=True) def test_setup_cprofile(benchmark): runs = [] @@ -36,6 +51,22 @@ def setup(): assert runs == [(1, 2), (1, 2)] +@pytest.mark.benchmark(cprofile=True) +def test_teardown_cprofile(benchmark): + runs = [] + + def stuff(): + runs.append('stuff') + + def teardown(): + runs.append('teardown') + + benchmark.pedantic(stuff, teardown=teardown) + assert runs == ['stuff', 'teardown', 'stuff', 'teardown'] + + runs = [] + + def test_args_kwargs(benchmark): runs = [] @@ -101,6 +132,39 @@ def setup(): assert runs == [(1, 2)] * 10 +def test_teardown_many_rounds(benchmark): + runs = [] + + def stuff(): + runs.append('stuff') + + def teardown(): + runs.append('teardown') + + benchmark.pedantic(stuff, teardown=teardown, rounds=10) + assert runs == ['stuff', 'teardown'] * 10 + + +def test_teardown_many_iterations(benchmark): + runs = [] + + def stuff(): + runs.append('stuff') + + def teardown(): + runs.append('teardown') + + benchmark.pedantic(stuff, teardown=teardown, iterations=3) + assert runs == [ + 'stuff', + 'stuff', + 'stuff', + 'teardown', # first round + 'stuff', + 'teardown', # computing the final result + ] + + def test_cant_use_both_args_and_setup_with_return(benchmark): runs = []