Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/draft-release.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: Draft release

on:
push:
branches:
- main
workflow_dispatch:
# pull_request event is required only for autolabeler
pull_request:
Expand All @@ -10,6 +13,8 @@ jobs:
update-release-draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@v6.0.0
- uses: release-drafter/release-drafter@v7
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
commitish: main
7 changes: 7 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
### 0.10.1

- bump bioimageio.spec library version to 0.5.9.1
- enable Pytorch Metal Performance Shaper (MPS) fallback per default
- improved validation summary status tracking
- broader argument types (e.g. for bioimageio.core.digest_spec.get_tensor())

### 0.10.0

- bump bioimageio.spec library version to 0.5.9.0
Expand Down
2 changes: 1 addition & 1 deletion mkdocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ plugins:
python:
inventories:
- https://docs.pydantic.dev/latest/objects.inv
- https://bioimage-io.github.io/spec-bioimage-io/v0.5.9.0/objects.inv
- https://bioimage-io.github.io/spec-bioimage-io/v0.5.9.1/objects.inv
options:
annotations_path: source
backlinks: tree
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ requires-python = ">=3.9"
readme = "README.md"
dynamic = ["version"]
dependencies = [
"bioimageio.spec ==0.5.9.0",
"bioimageio.spec ==0.5.9.1",
"imagecodecs",
"imageio>=2.10",
"loguru",
Expand Down
2 changes: 1 addition & 1 deletion src/bioimageio/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"""
# ruff: noqa: E402

__version__ = "0.10.0"
__version__ = "0.10.1"
from loguru import logger

logger.disable("bioimageio.core")
Expand Down
120 changes: 120 additions & 0 deletions src/bioimageio/core/_collection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
"""Collection utilities for bioimageio"""

import json
import warnings
from functools import cache
from typing import Literal, Mapping, Sequence, TypedDict

from bioimageio.spec.common import Sha256
from bioimageio.spec.utils import get_reader

from ._settings import settings


class IndexItemVersion(TypedDict):
version: str
source: str
sha256: Sha256


class IndexItem(TypedDict):
id: str
type: str
versions: Sequence[IndexItemVersion]


class Index(TypedDict):
items: Sequence[IndexItem]
total: int
count_per_type: Mapping[str, int]
timestamp: str


class IdPartsEntry(TypedDict):
nouns: Mapping[str, str]
adjectives: Sequence[str]


class CollectionConfig(TypedDict):
id_parts: Mapping[Literal["model", "dataset", "notebook"], IdPartsEntry]
reviewers: Mapping[str, Sequence[str]]


@cache
def load_json(url: str):
reader = get_reader(url)
return json.load(reader)


def load_index() -> Index:
return load_json(settings.collection_index_url)


def load_collection_config() -> CollectionConfig:
return load_json(settings.collection_config_url)


@cache
def load_hypened_nouns() -> set[str]:
"""get all nouns with hyphens that could be part of a nickname, e.g. 't-rex'"""
return {
noun
for id_parts in load_collection_config()["id_parts"].values()
for noun in id_parts.get("nouns", [])
if "-" in noun
}


def lookup_from_index(source: str) -> tuple[str, dict[Literal["sha256"], Sha256]]:
index = load_index()
if source.startswith("bioimage-io/") and source.count("/") == 2:
version = source.split("/")[-1]
source = source[: -(len(version) + 1)]
else:
version = None

for item in index["items"]:
if item["id"] in (source, f"bioimage-io/{source}"):
v = item["versions"][-1]
if version is not None:
for v in item["versions"]:
if v["version"] == version:
break
else:
warnings.warn(
f"Version {version} not found in index, using latest version."
)
else:
continue

return v["source"], {"sha256": v["sha256"]}

return source, {}


def get_resource_icon(nickname: str, rtype: str) -> str:
"""Get emoji for a resource, matching to its nickname noun. nicknames are of the form "{adjective}-{noun}", e.g. "affable-shark"."""
if "-" not in nickname:
return " "

# remove hyphen from noun part of nickname, e.g. "laid-back-t-rex" -> "laid-back-trex"
for hyphened_noun in load_hypened_nouns():
if nickname.endswith(hyphened_noun):
nickname = nickname[: -len(hyphened_noun)] + hyphened_noun.replace("-", "")

# last hyphen now sparates adjective and noun, e.g. "laid-back-trex" -> "laid-back" and "trex"
noun = nickname[nickname.rfind("-") + 1 :].replace("-", "")
try:
ret = {
k.replace("-", ""): v
for k, v in load_collection_config()["id_parts"][
rtype if rtype in ("model", "dataset", "notebook") else "notebook"
]
.get("nouns", {})
.items()
}.get(noun, " ")
except Exception as e:
warnings.warn(f"Error getting icon for {rtype} {nickname}: {e}")
ret = " "

return ret
59 changes: 36 additions & 23 deletions src/bioimageio/core/_resource_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def test_model(
*,
determinism: Literal["seed_only", "full"] = "seed_only",
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> ValidationSummary:
Expand Down Expand Up @@ -212,7 +212,7 @@ def test_description(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Optional[str] = None,
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
runtime_env: Union[
Literal["currently-active", "as-described"], Path, BioimageioCondaEnv
] = ("currently-active"),
Expand Down Expand Up @@ -313,6 +313,8 @@ def test_description(
else:
file_source = source

# elevate status valid-format to passed and start testing
descr.validation_summary.status = "passed"
_test_in_env(
file_source,
descr=descr,
Expand Down Expand Up @@ -560,7 +562,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Literal["model"],
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[ModelDescr, InvalidDescr]: ...
Expand All @@ -576,7 +578,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Literal["dataset"],
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[DatasetDescr, InvalidDescr]: ...
Expand All @@ -592,7 +594,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Optional[str] = None,
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[LatestResourceDescr, InvalidDescr]: ...
Expand All @@ -608,7 +610,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Literal["model"],
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[AnyModelDescr, InvalidDescr]: ...
Expand All @@ -624,7 +626,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Literal["dataset"],
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[AnyDatasetDescr, InvalidDescr]: ...
Expand All @@ -640,7 +642,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Optional[str] = None,
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[ResourceDescr, InvalidDescr]: ...
Expand All @@ -655,7 +657,7 @@ def load_description_and_test(
determinism: Literal["seed_only", "full"] = "seed_only",
expected_type: Optional[str] = None,
sha256: Optional[Sha256] = None,
stop_early: bool = True,
stop_early: bool = False,
working_dir: Optional[Union[os.PathLike[str], str]] = None,
**deprecated: Unpack[DeprecatedKwargs],
) -> Union[ResourceDescr, InvalidDescr]:
Expand Down Expand Up @@ -717,7 +719,15 @@ def load_description_and_test(
)

if expected_type is not None:
_test_expected_resource_type(rd, expected_type)
has_expected_type = _test_expected_resource_type(rd, expected_type)
if not has_expected_type:
# unexpected type -> invalid format
rd.validation_summary.status = "failed"
return rd

# elevate status valid-format to passed and start testing
if rd.validation_summary.status == "valid-format":
rd.validation_summary.status = "passed"

if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)):
if weight_format is None:
Expand All @@ -729,7 +739,7 @@ def load_description_and_test(

enable_determinism(determinism, weight_formats=weight_formats)
for w in weight_formats:
_test_model_inference(
passed_recreate_test_outputs = _test_recreate_test_outputs(
rd,
w,
devices,
Expand All @@ -738,14 +748,15 @@ def load_description_and_test(
verbose=working_dir is not None,
**deprecated,
)
if stop_early and rd.validation_summary.status != "passed":

if stop_early and not passed_recreate_test_outputs:
break

if not isinstance(rd, v0_4.ModelDescr):
_test_model_inference_parametrized(
passed_parametrized_inference = _test_parametrized_inference(
rd, w, devices, stop_early=stop_early
)
if stop_early and rd.validation_summary.status != "passed":
if stop_early and not passed_parametrized_inference:
break

# TODO: add execution of jupyter notebooks
Expand Down Expand Up @@ -807,7 +818,7 @@ def _get_tolerance(
return rtol, atol, mismatched_tol


def _test_model_inference(
def _test_recreate_test_outputs(
model: Union[v0_4.ModelDescr, v0_5.ModelDescr],
weight_format: SupportedWeightsFormat,
devices: Optional[Sequence[str]],
Expand All @@ -816,7 +827,7 @@ def _test_model_inference(
working_dir: Optional[Union[os.PathLike[str], str]],
verbose: bool,
**deprecated: Unpack[DeprecatedKwargs],
) -> None:
) -> bool:
test_name = f"Reproduce test outputs from test inputs ({weight_format})"
logger.debug("starting '{}'", test_name)
error_entries: List[ErrorEntry] = []
Expand Down Expand Up @@ -972,16 +983,16 @@ def save_to_working_dir(name: str, tensor: Tensor) -> List[Path]:
msg = (
f"Output '{m}': {mismatched_elements} of "
+ f"{expected_np.size} elements disagree with expected values."
+ f" ({mismatched_ppm:.1f} ppm)."
+ f" ({mismatched_ppm:.1f} ppm). "
)
else:
msg = f"Output `{m}`: all elements agree with expected values."
msg = f"Output `{m}`: all elements agree with expected values. "

msg += (
f"\n Max relative difference not accounted for by absolute tolerance ({atol:.2e}): {r_max:.2e}"
f"\nMax relative difference not accounted for by absolute tolerance ({atol:.2e}):\n{r_max:.2e}"
+ rf" (= \|{r_actual:.2e} - {r_expected:.2e}\|/\|{r_expected:.2e} + 1e-6\|)"
+ f" at {dict(zip(dims, r_max_idx))}"
+ f"\n Max absolute difference not accounted for by relative tolerance ({rtol:.2e}): {a_max:.2e}"
+ f" at {dict(zip(dims, r_max_idx))} "
+ f"\nMax absolute difference not accounted for by relative tolerance ({rtol:.2e}):\n{a_max:.2e}"
+ rf" (= \|{a_actual:.7e} - {a_expected:.7e}\|) at {dict(zip(dims, a_max_idx))}"
)
if output_paths:
Expand Down Expand Up @@ -1012,9 +1023,10 @@ def save_to_working_dir(name: str, tensor: Tensor) -> List[Path]:
warnings=warning_entries,
)
)
return bool(error_entries)


def _test_model_inference_parametrized(
def _test_parametrized_inference(
model: v0_5.ModelDescr,
weight_format: SupportedWeightsFormat,
devices: Optional[Sequence[str]],
Expand Down Expand Up @@ -1194,7 +1206,7 @@ def get_ns(n: int):
def _test_expected_resource_type(
rd: Union[InvalidDescr, ResourceDescr], expected_type: str
):
has_expected_type = rd.type == expected_type
has_expected_type = rd.type is expected_type
rd.validation_summary.details.append(
ValidationDetail(
name="Has expected resource type",
Expand All @@ -1213,6 +1225,7 @@ def _test_expected_resource_type(
),
)
)
return has_expected_type


# TODO: Implement `debug_model()`
Expand Down
Loading
Loading