Skip to content

Commit

Permalink
* Fix generate parametrize tests benchmark csv report errors #268 <h…
Browse files Browse the repository at this point in the history
  • Loading branch information
jnhyperion authored and ionelmc committed Oct 27, 2024
1 parent dfbb285 commit 86e6f83
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 1 deletion.
1 change: 1 addition & 0 deletions AUTHORS.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,4 @@ Authors
* Friedrich Delgado - https://github.com/TauPan
* Sam James - https://github.com/thesamesam
* Florian Bruhin - https://github.com/The-Compiler
* Johnny Huang - https://github.com/jnhyperion
3 changes: 3 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@

Changelog
=========
4.0.1
------------------
* Fix generate parametrize tests benchmark csv report errors `#268 <https://github.com/ionelmc/pytest-benchmark/issues/268>`_.

4.0.0 (2022-10-26)
------------------
Expand Down
4 changes: 3 additions & 1 deletion src/pytest_benchmark/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ def render(self, output_file, groups):

for bench in benchmarks:
row = [bench.get('fullfunc', bench['fullname'])]
row.extend(bench.get('params', {}).get(param, '') for param in params)
bench_params = bench.get('params', {})
bench_params = bench_params if bench_params is not None else {}
row.extend(bench_params.get(param, "") for param in params)
row.extend(bench[prop] for prop in self.columns)
writer.writerow(row)
self.logger.info(f'Generated csv: {output_file}', bold=True)
27 changes: 27 additions & 0 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -1209,3 +1209,30 @@ def test_columns(testdir):
'------*',
]
)


def test_generate_csv_report_for_parametrize_tests(testdir):
test = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", ["foo", "bar"])
def test1(benchmark, arg):
def func():
print(arg)
benchmark(func)
def test2(benchmark):
def func():
print("foo")
benchmark(func)
""")
result = testdir.runpytest_subprocess('--benchmark-autosave', test)
result.stderr.fnmatch_lines(
[
'Saved benchmark data in: *',
]
)
result = testdir.run(
'py.test-benchmark',
'compare',
'--csv',
)
result.stderr.fnmatch_lines(['Generated csv: *.csv'])

0 comments on commit 86e6f83

Please sign in to comment.