Skip to content

Commit

Permalink
Merge branch 'develop' into enhance-torch-compile
Browse files Browse the repository at this point in the history
  • Loading branch information
Abdol authored Jul 9, 2024
2 parents 02b8771 + 647d30b commit 2729a06
Show file tree
Hide file tree
Showing 20 changed files with 2,785 additions and 2,755 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
sudo apt update
sudo apt-get install -y libopenslide-dev openslide-tools libopenjp2-7 libopenjp2-tools
python -m pip install --upgrade pip
python -m pip install ruff==0.4.10 pytest pytest-cov pytest-runner
python -m pip install ruff==0.5.1 pytest pytest-cov pytest-runner
pip install -r requirements/requirements.txt
- name: Cache tiatoolbox static assets
uses: actions/cache@v3
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ repos:
- id: rst-inline-touching-normal # Detect mistake of inline code touching normal text in rst.
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.4.10
rev: v0.5.1
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
5,376 changes: 2,688 additions & 2,688 deletions benchmarks/annotation_store.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions benchmarks/annotation_store_alloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,8 @@ def main(
# Skip memray if not installed
return
regex = re.compile(r"Total memory allocated:\s*([\d.]+)MB")
pipe = subprocess.Popen(
[ # noqa: S603
pipe = subprocess.Popen( # noqa: S603
[
sys.executable,
"-m",
"memray",
Expand Down
1 change: 0 additions & 1 deletion examples/full-pipelines/slide-graph.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1577,7 +1577,6 @@
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" #\n",
" loss = loss.detach().cpu().numpy()\n",
" assert not np.isnan(loss) # noqa: S101\n",
" wsi_labels = wsi_labels.cpu().numpy()\n",
Expand Down
1 change: 0 additions & 1 deletion examples/inference-pipelines/slide-graph.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1171,7 +1171,6 @@
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" #\n",
" loss = loss.detach().cpu().numpy()\n",
" assert not np.isnan(loss) # noqa: S101\n",
" wsi_labels = wsi_labels.cpu().numpy()\n",
Expand Down
12 changes: 6 additions & 6 deletions pre-commit/notebook_urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
def git_branch_name() -> str:
"""Get the current branch name."""
return (
subprocess.check_output(
["/usr/bin/git", "rev-parse", "--abbrev-ref", "HEAD"], # noqa: S603
subprocess.check_output( # noqa: S603
["/usr/bin/git", "rev-parse", "--abbrev-ref", "HEAD"],
)
.decode()
.strip()
Expand All @@ -27,8 +27,8 @@ def git_branch_modified_paths(from_ref: str, to_ref: str) -> set[Path]:
from_to = f"{from_ref}...{to_ref}"
return {
Path(p)
for p in subprocess.check_output(
[ # noqa: S603
for p in subprocess.check_output( # noqa: S603
[
"/usr/bin/git",
"diff",
"--name-only",
Expand All @@ -45,8 +45,8 @@ def git_previous_commit_modified_paths() -> set[Path]:
"""Get a set of file paths modified in the previous commit."""
return {
Path(p)
for p in subprocess.check_output(
["/usr/bin/git", "diff", "--name-only", "HEAD~"], # noqa: S603
for p in subprocess.check_output( # noqa: S603
["/usr/bin/git", "diff", "--name-only", "HEAD~"],
)
.decode()
.strip()
Expand Down
4 changes: 2 additions & 2 deletions pre-commit/requirements_consistency.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def parse_conda(file_path: Path) -> dict[str, Requirement]:
# pip-style dependency
if isinstance(dependency, dict):
pip = parse_pip(lines=dependency["pip"])
for package_name, requirement in pip.items():
packages[package_name] = requirement
packages = dict(pip.items())

continue
requirement = Requirement.parse(dependency)
# Check for duplicate packages
Expand Down
2 changes: 1 addition & 1 deletion requirements/requirements_dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pytest>=7.2.0
pytest-cov>=4.0.0
pytest-runner>=6.0
pytest-xdist[psutil]
ruff==0.4.10 # This will be updated by pre-commit bot to latest version
ruff==0.5.1 # This will be updated by pre-commit bot to latest version
toml>=0.10.2
twine>=4.0.1
wheel>=0.37.1
12 changes: 11 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,17 @@ def sample_ome_tiff(remote_sample: Callable) -> Path:
Download ome-tiff image for pytest.
"""
return remote_sample("ome-brightfield-pyramid-1-small")
return remote_sample("ome-brightfield-small-pyramid")


@pytest.fixture(scope="session")
def sample_ome_tiff_level_0(remote_sample: Callable) -> Path:
"""Sample pytest fixture for ome-tiff image with one level.
Download ome-tiff image for pytest.
"""
return remote_sample("ome-brightfield-small-level-0")


@pytest.fixture(scope="session")
Expand Down
1 change: 0 additions & 1 deletion tests/models/test_arch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ def test_all() -> None:
)
assert np.sum(_output - output) == 0

#
with pytest.raises(ValueError, match=r".*Unknown.*format.*"):
centre_crop(_output[None, :, :, None], [2, 2], "NHWCT")

Expand Down
1 change: 0 additions & 1 deletion tests/models/test_hovernet.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ def test_unit_blocks() -> None:
output = block(sample)
assert np.sum(output.shape - np.array([1, 32, 15, 15])) == 0, f"{output.shape}"

#
block = DenseBlock(16, [1, 3], [16, 16], 3)
output = block(sample)
assert output.shape[1] == 16 * 4, f"{output.shape}"
Expand Down
4 changes: 2 additions & 2 deletions tests/test_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ def helper_logger_test(level: str) -> None:
f'logger.{level.lower()}("Test if {level.lower()} is written to std{output}.")'
)

proc = subprocess.Popen(
[ # noqa: S603, S607
proc = subprocess.Popen( # noqa: S603
[ # noqa: S607
"python",
"-c",
run_statement,
Expand Down
8 changes: 4 additions & 4 deletions tests/test_tiffreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def test_ome_missing_instrument_ref(
remote_sample: Callable,
) -> None:
"""Test that an OME-TIFF can be read without instrument reference."""
sample = remote_sample("ome-brightfield-pyramid-1-small")
sample = remote_sample("ome-brightfield-small-level-0")
wsi = wsireader.TIFFWSIReader(sample)
page = wsi.tiff.pages[0]
description = page.description
Expand All @@ -37,7 +37,7 @@ def test_ome_missing_physicalsize(
remote_sample: Callable,
) -> None:
"""Test that an OME-TIFF can be read without physical size."""
sample = remote_sample("ome-brightfield-pyramid-1-small")
sample = remote_sample("ome-brightfield-small-level-0")
wsi = wsireader.TIFFWSIReader(sample)
page = wsi.tiff.pages[0]
description = page.description
Expand All @@ -62,7 +62,7 @@ def test_ome_missing_physicalsizey(
remote_sample: Callable,
) -> None:
"""Test that an OME-TIFF can be read without physical size."""
sample = remote_sample("ome-brightfield-pyramid-1-small")
sample = remote_sample("ome-brightfield-small-level-0")
wsi = wsireader.TIFFWSIReader(sample)
page = wsi.tiff.pages[0]
description = page.description
Expand All @@ -86,7 +86,7 @@ def test_tiffreader_non_tiled_metadata(
remote_sample: Callable,
) -> None:
"""Test that fetching metadata for non-tiled TIFF works."""
sample = remote_sample("ome-brightfield-pyramid-1-small")
sample = remote_sample("ome-brightfield-small-level-0")
wsi = wsireader.TIFFWSIReader(sample)
monkeypatch.setattr(wsi.tiff, "is_ome", False)
monkeypatch.setattr(
Expand Down
18 changes: 13 additions & 5 deletions tests/test_wsireader.py
Original file line number Diff line number Diff line change
Expand Up @@ -959,15 +959,23 @@ def test_read_bounds_interpolated(sample_svs: Path) -> None:


def test_read_bounds_level_consistency_openslide(sample_ndpi: Path) -> None:
"""Test read_bounds produces the same visual field across resolution levels."""
"""Test read_bounds produces the same visual field across resolution levels.
with OpenSlideWSIReader.
"""
wsi = wsireader.OpenSlideWSIReader(sample_ndpi)
bounds = NDPI_TEST_TISSUE_BOUNDS

read_bounds_level_consistency(wsi, bounds)


def test_read_bounds_level_consistency_jp2(sample_jp2: Path) -> None:
"""Test read_bounds produces the same visual field across resolution levels."""
"""Test read_bounds produces the same visual field across resolution levels.
Using JP2WSIReader.
"""
bounds = JP2_TEST_TISSUE_BOUNDS
wsi = wsireader.JP2WSIReader(sample_jp2)

Expand Down Expand Up @@ -1883,11 +1891,11 @@ def test_tiffwsireader_invalid_svs_metadata(


def test_tiffwsireader_invalid_ome_metadata(
sample_ome_tiff: Path,
sample_ome_tiff_level_0: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test exception raised for invalid OME-XML metadata instrument."""
wsi = wsireader.TIFFWSIReader(sample_ome_tiff)
wsi = wsireader.TIFFWSIReader(sample_ome_tiff_level_0)
monkeypatch.setattr(
wsi.tiff.pages[0],
"description",
Expand Down Expand Up @@ -2545,7 +2553,7 @@ def test_jp2_no_header(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
},
{
"reader_class": TIFFWSIReader,
"sample_key": "ome-brightfield-pyramid-1-small",
"sample_key": "ome-brightfield-small-pyramid",
"kwargs": {},
},
{
Expand Down
4 changes: 2 additions & 2 deletions tiatoolbox/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ def _local_sample_path(path: str | Path) -> Path:
"""
file_path = importlib_resources.files("tiatoolbox") / str(Path("data") / path)
with importlib_resources.as_file(file_path) as path:
return path
with importlib_resources.as_file(file_path) as file_path_:
return file_path_


def stain_norm_target() -> np.ndarray:
Expand Down
6 changes: 4 additions & 2 deletions tiatoolbox/data/remote_samples.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ files:
url: [*wsis, "CMU-1-Small-Region.jpeg.tiff"]
tiled-tiff-1-small-jp2k:
url: [*wsis, "CMU-1-Small-Region.jp2k.tiff"]
ome-brightfield-pyramid-1-small:
url: [*wsis, "CMU-1-Small-Region.ome.tiff"]
ome-brightfield-small-level-0:
url: [*wsis, "CMU-1-Small-Region-Level-0.ome.tiff"]
ome-brightfield-small-pyramid:
url: [*wsis, "CMU-1-Small-Region-Pyramid.ome.tif"]
two-tiled-pages:
url: [*wsis, "two-tiled-pages.tiff"]
ventana-tif:
Expand Down
28 changes: 14 additions & 14 deletions tiatoolbox/utils/env_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,12 +250,12 @@ def check_pixman_using_anaconda(versions: list) -> tuple[list, str]:
"""Using anaconda to check for pixman."""
using = "conda"
try:
conda_list = subprocess.Popen(
("conda", "list"), # noqa: S603
conda_list = subprocess.Popen( # noqa: S603
("conda", "list"),
stdout=subprocess.PIPE,
)
conda_pixman = subprocess.check_output(
("grep", "pixman"), # noqa: S603
conda_pixman = subprocess.check_output( # noqa: S603
("grep", "pixman"),
stdin=conda_list.stdout,
)
conda_list.wait()
Expand All @@ -276,8 +276,8 @@ def check_pixman_using_dpkg(versions: list) -> tuple[list, str]:
"""Using dpkg to check for pixman."""
using = "dpkg"
try:
dkpg_output = subprocess.check_output(
["/usr/bin/dpkg", "-s", "libpixman-1-0"], # noqa: S603
dkpg_output = subprocess.check_output( # noqa: S603
["/usr/bin/dpkg", "-s", "libpixman-1-0"],
)
except subprocess.SubprocessError:
dkpg_output = b""
Expand All @@ -296,12 +296,12 @@ def check_pixman_using_brew(versions: list) -> tuple[list, str]:
"""Using homebrew to check for pixman."""
using = "brew"
try:
brew_list = subprocess.Popen(
("brew", "list", "--versions"), # noqa: S603
brew_list = subprocess.Popen( # noqa: S603
("brew", "list", "--versions"),
stdout=subprocess.PIPE,
)
brew_pixman = subprocess.check_output(
("grep", "pixman"), # noqa: S603
brew_pixman = subprocess.check_output( # noqa: S603
("grep", "pixman"),
stdin=brew_list.stdout,
)
brew_list.wait()
Expand All @@ -326,12 +326,12 @@ def check_pixman_using_macports(versions: list) -> tuple[list, str]:
"""
using = "port"
port_list = subprocess.Popen(
("port", "installed"), # noqa: S603
port_list = subprocess.Popen( # noqa: S603
("port", "installed"),
stdout=subprocess.PIPE,
)
port_pixman = subprocess.check_output(
("grep", "pixman"), # noqa: S603
port_pixman = subprocess.check_output( # noqa: S603
("grep", "pixman"),
stdin=port_list.stdout,
)
port_list.wait()
Expand Down
6 changes: 3 additions & 3 deletions tiatoolbox/visualization/bokeh_app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@
Column,
ColumnDataSource,
CustomJS,
CustomJSTickFormatter,
DataTable,
Div,
Dropdown,
FuncTickFormatter,
Glyph,
HoverTool,
HTMLTemplateFormatter,
Expand Down Expand Up @@ -630,7 +630,7 @@ def __init__(self: ViewerState, slide_path: str | Path) -> None:
self.thickness = -1
self.model_mpp = 0
self.init = True
self.micron_formatter = FuncTickFormatter(
self.micron_formatter = CustomJSTickFormatter(
args={"mpp": 0.1},
code="""
return Math.round(tick*mpp)
Expand Down Expand Up @@ -2086,7 +2086,7 @@ def setup_doc(self: DocConfig, base_doc: Document) -> tuple[Row, Tabs]:

# Set initial slide to first one in base folder
slide_list = []
for ext in ["*.svs", "*ndpi", "*.tiff", "*.mrxs", "*.png", "*.jpg"]:
for ext in ["*.svs", "*ndpi", "*.tiff", "*.tif", "*.mrxs", "*.png", "*.jpg"]:
slide_list.extend(list(doc_config["slide_folder"].glob(ext)))
slide_list.extend(
list(doc_config["slide_folder"].glob(str(Path("*") / ext))),
Expand Down
Loading

0 comments on commit 2729a06

Please sign in to comment.