From ff796954017f79769c813d6b4d619dafc57bd663 Mon Sep 17 00:00:00 2001 From: Jakub Kaczmarzyk Date: Tue, 9 Jul 2024 14:15:48 -0400 Subject: [PATCH] [DOC] add disclaimer that this project is for academic research purposes only (#224) [skip ci] * add academic research use warning to readme * add academic research disclaimer * update float and int types for slide coords and probs Use np.floating and np.integer. * do not ignore types when making binary im arr --- README.md | 3 +++ docs/index.rst | 4 ++++ wsinfer/modellib/run_inference.py | 4 ++-- wsinfer/patchlib/segment.py | 5 +---- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c8e4f1e..9d71e49 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,9 @@ Original H&E | Heatmap of Tumor Probability [![Supported Python versions](https://img.shields.io/pypi/pyversions/wsinfer)](https://pypi.org/project/wsinfer/) [![Published in npj Precision Oncology](https://img.shields.io/badge/Published-npj_Precision_Oncology-blue)](https://doi.org/10.1038/s41698-024-00499-9) +> [!CAUTION] +> WSInfer is an academic project intended for research use only. + See https://wsinfer.readthedocs.io for documentation. The main feature of WSInfer is a minimal command-line interface for running deep learning inference diff --git a/docs/index.rst b/docs/index.rst index fa9280f..6f0b054 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,6 +23,10 @@ WSInfer: blazingly fast inference on whole slide images on whole slide images. It includes several built-in models, and it can be used with any PyTorch model as well. The built-in models :ref:`are listed below `. +.. caution:: + + WSInfer is an academic project intended for research use only. + Running inference on whole slide images is done with a single command line: :: diff --git a/wsinfer/modellib/run_inference.py b/wsinfer/modellib/run_inference.py index 7ba8aa5..0a0926e 100644 --- a/wsinfer/modellib/run_inference.py +++ b/wsinfer/modellib/run_inference.py @@ -171,8 +171,8 @@ def run_inference( # Store the coordinates and model probabiltiies of each patch in this slide. # This lets us know where the probabiltiies map to in the slide. - slide_coords: list[npt.NDArray[np.int_]] = [] - slide_probs: list[npt.NDArray[np.float_]] = [] + slide_coords: list[npt.NDArray[np.integer]] = [] + slide_probs: list[npt.NDArray[np.floating]] = [] for batch_imgs, batch_coords in tqdm.tqdm(loader): assert batch_imgs.shape[0] == batch_coords.shape[0], "length mismatch" with torch.no_grad(): diff --git a/wsinfer/patchlib/segment.py b/wsinfer/patchlib/segment.py index a6963fd..cd071e4 100644 --- a/wsinfer/patchlib/segment.py +++ b/wsinfer/patchlib/segment.py @@ -66,10 +66,7 @@ def segment_tissue( _, im_arr = cv.threshold( im_arr, thresh=binary_threshold, maxval=255, type=cv.THRESH_BINARY ) - - # Convert to boolean dtype. This helps with static type analysis because at this - # point, im_arr is a uint8 array. - im_arr_binary: npt.NDArray[np.bool_] = im_arr > 0 # type: ignore + im_arr_binary: npt.NDArray[np.bool_] = im_arr > 0 # Closing. This removes small holes. It might not be entirely necessary because # we have hole removal below.