diff --git a/Dockerfile b/Dockerfile
index df08064..f80301e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,3 +1,9 @@
+# Build:
+# docker build -t ghcr.io/decodingraphael/unraphael .
+# Push to GitHub Container Registry:
+# docker push ghcr.io/decodingraphael/unraphael
+# Run:
+# docker run -p 8501:8501 ghcr.io/decodingraphael/unraphael
FROM python:3.12-slim
RUN pip install torch==2.3.1+cpu torchvision torchaudio 'numpy<2.0' --extra-index-url https://download.pytorch.org/whl/cpu
diff --git a/README.md b/README.md
index 1b83d9c..a077783 100644
--- a/README.md
+++ b/README.md
@@ -75,3 +75,70 @@ unraphael-dash
Check out our [Contributing Guidelines](CONTRIBUTING.md#Getting-started-with-development) to get started with development.
Suggestions, improvements, and edits are most welcome.
+
+## Self hosted deployment
+
+To run on dashboard with:
+
+```shell
+sudo apt-get update && apt-get install libgl1 libglib2.0-0 -y
+# As www-data user
+python3 -m venv venv
+pip install 'unraphael[dash]@git+https://github.com/DecodingRaphael/unraphael.git@0.3'
+```
+
+
+ Systemd service
+
+To run unraphael as a service, you can create a systemd service file. This will allow you to start, stop, and restart unraphael using systemd.
+
+1. Create a service file for unraphael, for example `/etc/systemd/system/unraphael.service`:
+
+```
+[Unit]
+Description=Unraphael dashboard
+After=network.target
+
+[Service]
+Environment="XDG_CACHE_HOME=/cache/dir" HOME="/writable/dir"
+User=youruser
+WorkingDirectory=/home/youruser
+ExecStart=/home/youruser/.local/bin/unraphael-dash
+Restart=on-failure
+User=youruser
+WorkingDirectory=/home/youruser
+ExecStart=/home/youruser/.local/bin/unraphael-dash
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+```
+
+Replace `/cache/dir` and `/writable/dir` with the actual paths to your cache and writable directories.
+Replace `youruser` with your actual username. Also, make sure that the path to `unraphael-dash` is correct. You can find the correct path using `which unraphael-dash`.
+
+2. Enable the service:
+
+```console
+sudo systemctl enable unraphael.service
+```
+
+3. Start the service:
+
+```console
+sudo systemctl start unraphael.service
+```
+
+4. Check the status of the service:
+
+```console
+sudo systemctl status unraphael.service
+```
+
+5. To stop the service:
+
+```console
+sudo systemctl stop unraphael.service
+```
+
+
diff --git a/docs/credits.md b/docs/credits.md
index b3a9fd9..d339e6e 100644
--- a/docs/credits.md
+++ b/docs/credits.md
@@ -1,6 +1,6 @@
# Credits and Contact Information
-Unraphael is maintained by the [Netherlands eScience Center](https://www.esciencecenter.nl/) in collaboration with the [Department of History and Art History](https://www.uu.nl/en/organisation/department-of-history-and-art-history) at the [University of Utrecht](https://www.uu.nl/). The work was supported through a *Small-Scale Initiatives Digital Approaches to the Humanities* grant
+Unraphael is maintained by the [Netherlands eScience Center](https://www.esciencecenter.nl/) in collaboration with the [Department of History and Art History](https://www.uu.nl/en/organisation/department-of-history-and-art-history) at the [University of Utrecht](https://www.uu.nl/). The work was supported through a *Small-Scale Initiatives Digital Approaches to the Humanities* grant
If you have any questions, feedback, or need support, please feel free to reach out to us. Below are the primary contacts and useful links for your reference:
diff --git a/docs/steps/analysis.md b/docs/steps/analysis.md
index ab65530..0de491a 100644
--- a/docs/steps/analysis.md
+++ b/docs/steps/analysis.md
@@ -32,7 +32,7 @@ This then allows us to answer how similar the areas of the main outlines in two

- **Upload Photos:** Upload the digital photos of the paintings from a folder on your computer. For now, use the *unaligned photos*, preferably with the background removed.
-
+
1. **Overview of Image Sizes and DPI:**
- **Check Image Metrics:** Review the sizes and DPI of the uploaded images. This information helps in converting pixel measurements to physical dimensions.
@@ -50,4 +50,4 @@ This then allows us to answer how similar the areas of the main outlines in two
5. More detailed information is provided in the table below the heatmap indicating whether the areas of the main figures in the two paintings are similar, given the set tolerance.
-
\ No newline at end of file
+
diff --git a/docs/steps/clustering.md b/docs/steps/clustering.md
index 2c023fb..9c69812 100644
--- a/docs/steps/clustering.md
+++ b/docs/steps/clustering.md
@@ -13,7 +13,7 @@ By following the steps described below, you can effectively group your images ba
### Clustering Methods
-- We make use of functionality provided by the [clusteval package](https://erdogant.github.io/clusteval/pages/html/index.html) to derive the optimal number of clusters using silhouette, dbindex, and derivatives in combination with clustering methods, such as agglomerative, kmeans, dbscan and hdbscan.
+- We make use of functionality provided by the [clusteval package](https://erdogant.github.io/clusteval/pages/html/index.html) to derive the optimal number of clusters using silhouette, dbindex, and derivatives in combination with clustering methods, such as agglomerative, kmeans, dbscan and hdbscan.
- For more information on the methods or interpreting the results, we highly recommend looking into the [clusteval documentation](https://erdogant.github.io/clusteval/pages/html/index.html).
- Multiple clustering algorithms
@@ -62,10 +62,10 @@ These aligned images are now prepared for clustering, having been standardized i
### 4 Clustering Images
-Two primary clustering approaches are available:
+Two primary clustering approaches are available:
-- *Outer Contours Clustering*
-- *Complete Figures Clustering*.
+- *Outer Contours Clustering*
+- *Complete Figures Clustering*.
Both of these clustering processes group images based on structural similarities. Unlike semantic clustering, which might group images based on their color and content (e.g., animals, landscapes), structural clustering focuses on patterns, textures, and shapes.
diff --git a/pyproject.toml b/pyproject.toml
index 2ac282c..af239d9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -118,7 +118,7 @@ module = ["yaml.*", "toml.*"]
ignore_missing_imports = true
[tool.ruff]
-line-length = 96
+line-length = 270
target-version = "py310"
extend-include = ["*.ipynb"]
diff --git a/src/unraphael/dash/align.py b/src/unraphael/dash/align.py
index 68b21a1..403fcee 100644
--- a/src/unraphael/dash/align.py
+++ b/src/unraphael/dash/align.py
@@ -16,9 +16,7 @@
from unraphael.types import ImageType
-def detect_and_compute_features(
- image_gray: np.ndarray, method: str, maxFeatures: int
-) -> Tuple[list, np.ndarray]:
+def detect_and_compute_features(image_gray: np.ndarray, method: str, maxFeatures: int) -> Tuple[list, np.ndarray]:
"""Detects and computes features in the image."""
if method == 'SIFT':
feature_detector = cv2.SIFT_create()
@@ -58,14 +56,10 @@ def compute_homography(matches: list, kpsA: list, kpsB: list, keepPercent: float
return cv2.findHomography(ptsA, ptsB, method=cv2.RANSAC)[0]
-def apply_homography(
- target: np.ndarray, H: np.ndarray, template_shape: Tuple[int, int, int]
-) -> np.ndarray:
+def apply_homography(target: np.ndarray, H: np.ndarray, template_shape: Tuple[int, int, int]) -> np.ndarray:
"""Applies the homography matrix to the target image."""
h, w, c = template_shape
- return cv2.warpPerspective(
- target, H, (w, h), borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0)
- )
+ return cv2.warpPerspective(target, H, (w, h), borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0))
def homography_matrix(
@@ -205,9 +199,7 @@ def feature_align(
# apply the homography matrix to align the images, including the rotation
h, w, c = template.shape
- aligned = cv2.warpPerspective(
- target, H, (w, h), borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0)
- )
+ aligned = cv2.warpPerspective(target, H, (w, h), borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0))
out = image.replace(data=aligned)
out.metrics.update(angle=angle)
diff --git a/src/unraphael/dash/equalize.py b/src/unraphael/dash/equalize.py
index 297fa04..339be9e 100644
--- a/src/unraphael/dash/equalize.py
+++ b/src/unraphael/dash/equalize.py
@@ -54,9 +54,7 @@ def normalize_brightness(
# Adjust the L channel (brightness) of the target image based
# on the mean brightness of the template
- l_target = (
- (l_target * (np.mean(l_template) / np.mean(l_target))).clip(0, 255).astype(np.uint8)
- )
+ l_target = (l_target * (np.mean(l_template) / np.mean(l_target))).clip(0, 255).astype(np.uint8)
# Merge LAB channels back for the adjusted target image
equalized_img_lab = cv2.merge([l_target, a_target, b_target])
@@ -133,9 +131,7 @@ def normalize_contrast(
std_target = np.std(target_lab[:, :, 0])
# Adjust contrast of target image to match template image
- l_target = (
- (target_lab[:, :, 0] * (std_template / std_target)).clip(0, 255).astype(np.uint8)
- )
+ l_target = (target_lab[:, :, 0] * (std_template / std_target)).clip(0, 255).astype(np.uint8)
normalized_img_lab = cv2.merge([l_target, target_lab[:, :, 1], target_lab[:, :, 2]])
# Convert the adjusted LAB image back to RGB
@@ -207,17 +203,11 @@ def normalize_sharpness(
mean_grad_target = np.mean(grad_target)
# Adjust sharpness of target image to match template image
- normalized_img = (
- (target * (mean_grad_template / mean_grad_target)).clip(0, 255).astype(np.uint8)
- )
+ normalized_img = (target * (mean_grad_template / mean_grad_target)).clip(0, 255).astype(np.uint8)
# Calculate sharpness value for the normalized image
- grad_x_normalized = cv2.Sobel(
- cv2.cvtColor(normalized_img, cv2.COLOR_RGB2GRAY), cv2.CV_64F, 1, 0, ksize=3
- )
- grad_y_normalized = cv2.Sobel(
- cv2.cvtColor(normalized_img, cv2.COLOR_RGB2GRAY), cv2.CV_64F, 0, 1, ksize=3
- )
+ grad_x_normalized = cv2.Sobel(cv2.cvtColor(normalized_img, cv2.COLOR_RGB2GRAY), cv2.CV_64F, 1, 0, ksize=3)
+ grad_y_normalized = cv2.Sobel(cv2.cvtColor(normalized_img, cv2.COLOR_RGB2GRAY), cv2.CV_64F, 0, 1, ksize=3)
grad_normalized = cv2.magnitude(grad_x_normalized, grad_y_normalized)
mean_grad_normalized = np.mean(grad_normalized)
diff --git a/src/unraphael/dash/home.py b/src/unraphael/dash/home.py
index 8dce3be..d75adfc 100644
--- a/src/unraphael/dash/home.py
+++ b/src/unraphael/dash/home.py
@@ -13,12 +13,7 @@
menu_items={
'Get Help': 'https://unraphael.readthedocs.io',
'Report a bug': 'https://github.com/DedodingRaphael/unraphael/issues',
- 'About': (
- f'**unraphael**: a dashboard for unraphael ({__version__}). '
- '\n\nPython toolkit for *unraveling* image similarity with a focus '
- 'on artistic practice. '
- '\n\nFor more information, see: https://github.com/DedodingRaphael/unraphael'
- ),
+ 'About': (f'**unraphael**: a dashboard for unraphael ({__version__}). ' '\n\nPython toolkit for *unraveling* image similarity with a focus ' 'on artistic practice. ' '\n\nFor more information, see: https://github.com/DedodingRaphael/unraphael'),
},
)
@@ -36,7 +31,8 @@
This tool aims to provide new insights into Raphael's working methods through new digital
approaches for the study of artistic practice in art history.
-""")
+"""
+)
# Center-align using Streamlit's layout
col1, col2, col3 = st.columns([1, 2, 1]) # Middle column is wider
@@ -81,7 +77,7 @@
This project is maintained by the [Netherlands eScience Center](https://www.esciencecenter.nl/) in collaboration with the [Department of History and Art History](https://www.uu.nl/en/organisation/department-of-history-and-art-history) at the University of Utrecht.
-**Principal Investigator:** Dr. L. Costiner ([l.costiner@uu.nl](mailto:l.costiner@uu.nl))
+**Principal Investigator:** Dr. L. Costiner ([l.costiner@uu.nl](mailto:l.costiner@uu.nl))
**Technical Support:** Thijs Vroegh, Stef Smeets ([t.vroegh@esciencecenter.nl](mailto:t.vroegh@esciencecenter.nl), [s.smeets@esciencecenter.nl](mailto:s.smeets@esciencecenter.nl))
Supported through a *Small-Scale Initiatives Digital Approaches to the Humanities* grant.
diff --git a/src/unraphael/dash/image_clustering.py b/src/unraphael/dash/image_clustering.py
index f94aea0..0a8f1d0 100644
--- a/src/unraphael/dash/image_clustering.py
+++ b/src/unraphael/dash/image_clustering.py
@@ -15,19 +15,17 @@
import matplotlib.pyplot as plt
import numpy as np
import piq
-import ssim.ssimlib as pyssim
import streamlit as st
import torch
from clusteval import clusteval
-from PIL import Image
from pystackreg import StackReg
from rembg import remove
from scatterd import scatterd
+from scipy import signal
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.interpolate import interp1d
from scipy.spatial import procrustes
from scipy.spatial.distance import directed_hausdorff, squareform
-from scipy import signal
from skimage import color, transform
from skimage.feature import hog
from skimage.metrics import structural_similarity as ssim
@@ -51,7 +49,7 @@
MSE_NUMERATOR = 1000.0
NUM_THREADS = 8
-import os
+
torch.classes.__path__ = [] # Simple fix
# Alternatively: torch.classes.__path__ = [os.path.join(torch.__path__[0], torch.classes.__file__)]
@@ -103,9 +101,7 @@ def compute_mean_sharpness(images: list[np.ndarray]) -> float:
return mean_sharpness / len(images)
-def normalize_brightness_set(
- images: list[np.ndarray], mean_brightness: float
-) -> list[np.ndarray]:
+def normalize_brightness_set(images: list[np.ndarray], mean_brightness: float) -> list[np.ndarray]:
"""Normalize brightness of all images in the set to the mean brightness."""
normalized_images = []
for img in images:
@@ -114,11 +110,7 @@ def normalize_brightness_set(
l_channel, a_channel, b_channel = cv2.split(img_lab)
current_brightness = np.mean(l_channel)
print(f'Original brightness: {current_brightness}')
- l_channel = (
- (l_channel * (mean_brightness / current_brightness))
- .clip(0, 255)
- .astype(np.uint8)
- )
+ l_channel = (l_channel * (mean_brightness / current_brightness)).clip(0, 255).astype(np.uint8)
normalized_img_lab = cv2.merge([l_channel, a_channel, b_channel])
normalized_img = cv2.cvtColor(normalized_img_lab, cv2.COLOR_LAB2BGR)
print(f'Normalized brightness: {np.mean(l_channel)}')
@@ -127,11 +119,7 @@ def normalize_brightness_set(
l_channel = img
current_brightness = np.mean(l_channel)
print(f'Original brightness: {current_brightness}')
- normalized_img = (
- (l_channel * (mean_brightness / current_brightness))
- .clip(0, 255)
- .astype(np.uint8)
- )
+ normalized_img = (l_channel * (mean_brightness / current_brightness)).clip(0, 255).astype(np.uint8)
print(f'Normalized brightness: {np.mean(normalized_img)}')
normalized_images.append(normalized_img)
@@ -148,9 +136,7 @@ def normalize_contrast_set(images: list[np.ndarray], mean_contrast: float) -> li
l_channel, a_channel, b_channel = cv2.split(img_lab)
current_contrast = np.std(l_channel)
print(f'Original contrast: {current_contrast}')
- l_channel = (
- (l_channel * (mean_contrast / current_contrast)).clip(0, 255).astype(np.uint8)
- )
+ l_channel = (l_channel * (mean_contrast / current_contrast)).clip(0, 255).astype(np.uint8)
normalized_img_lab = cv2.merge([l_channel, a_channel, b_channel])
normalized_img = cv2.cvtColor(normalized_img_lab, cv2.COLOR_LAB2BGR)
print(f'Normalized contrast: {np.std(l_channel)}')
@@ -159,9 +145,7 @@ def normalize_contrast_set(images: list[np.ndarray], mean_contrast: float) -> li
l_channel = img
current_contrast = np.std(l_channel)
print(f'Original contrast: {current_contrast}')
- normalized_img = (
- (l_channel * (mean_contrast / current_contrast)).clip(0, 255).astype(np.uint8)
- )
+ normalized_img = (l_channel * (mean_contrast / current_contrast)).clip(0, 255).astype(np.uint8)
print(f'Normalized contrast: {np.std(normalized_img)}')
normalized_images.append(normalized_img)
@@ -169,9 +153,7 @@ def normalize_contrast_set(images: list[np.ndarray], mean_contrast: float) -> li
return normalized_images
-def normalize_sharpness_set(
- images: list[np.ndarray], target_sharpness: float
-) -> list[np.ndarray]:
+def normalize_sharpness_set(images: list[np.ndarray], target_sharpness: float) -> list[np.ndarray]:
"""Normalize sharpness of all images in the set to the sharpness of the
target."""
normalized_images = []
@@ -205,11 +187,7 @@ def normalize_sharpness_set(
else:
normalized_img = sharpened
- normalized_sharpness = compute_sharpness(
- cv2.cvtColor(normalized_img, cv2.COLOR_BGR2GRAY)
- if len(img.shape) == 3
- else normalized_img
- )
+ normalized_sharpness = compute_sharpness(cv2.cvtColor(normalized_img, cv2.COLOR_BGR2GRAY) if len(img.shape) == 3 else normalized_img)
print(f'Normalized sharpness: {normalized_sharpness}')
normalized_images.append(normalized_img)
@@ -325,9 +303,7 @@ def align_images_to_mean(
"""
def resize_image(image: np.ndarray, target_size: tuple[int, int]) -> np.ndarray:
- return transform.resize(
- image, target_size, anti_aliasing=True, preserve_range=True
- ).astype(image.dtype)
+ return transform.resize(image, target_size, anti_aliasing=True, preserve_range=True).astype(image.dtype)
def ensure_grayscale(image: np.ndarray) -> np.ndarray:
# make grayscale if in color
@@ -335,10 +311,7 @@ def ensure_grayscale(image: np.ndarray) -> np.ndarray:
return color.rgb2gray(image)
return image
- resized_images = {
- name: resize_image(ensure_grayscale(image), target_size)
- for name, image in images.items()
- }
+ resized_images = {name: resize_image(ensure_grayscale(image), target_size) for name, image in images.items()}
image_stack = np.stack(list(resized_images.values()), axis=0)
if image_stack.ndim != 3:
@@ -546,45 +519,48 @@ def calculate_cw_ssim_similarity(i1: np.ndarray, i2: np.ndarray) -> float:
Strong for handling small geometric distortions in structural
comparison.
-
- Implementation follows the original CW-SSIM algorithm from Wang and Simoncelli.
+
+ Implementation follows the original CW-SSIM algorithm from Wang and
+ Simoncelli.
"""
-
+
# Convert images to grayscale if needed
if len(i1.shape) == 3:
i1_gray = np.mean(i1, axis=2).astype(np.float32)
else:
i1_gray = i1.astype(np.float32)
-
+
if len(i2.shape) == 3:
i2_gray = np.mean(i2, axis=2).astype(np.float32)
else:
i2_gray = i2.astype(np.float32)
-
+
# Flatten the arrays for wavelet transform
sig1 = i1_gray.flatten()
sig2 = i2_gray.flatten()
-
+
# Define custom ricker wavelet function as fallback
def custom_ricker(points, a):
- """
- Return a Ricker wavelet (Mexican hat wavelet) of length 'points' with parameter 'a'.
-
- This is a custom implementation that can be used when scipy.signal.ricker
- or scipy.signal.windows.ricker are not available.
+ """Return a Ricker wavelet (Mexican hat wavelet) of length 'points'
+ with parameter 'a'.
+
+ This is a custom implementation that can be used when
+ scipy.signal.ricker or scipy.signal.windows.ricker are not
+ available.
"""
A = 2 / (np.sqrt(3 * a) * np.pi**0.25)
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
- mod = (1.0 - xsq / wsq)
+ mod = 1.0 - xsq / wsq
gauss = np.exp(-xsq / (2 * wsq))
return A * mod * gauss
-
+
# Use the correct wavelet function (handle both old and new SciPy versions)
try:
# Try new SciPy version (windows module)
from scipy.signal.windows import ricker
+
wavelet = ricker
except ImportError:
try:
@@ -593,32 +569,32 @@ def custom_ricker(points, a):
except AttributeError:
# Fall back to custom implementation if neither is available
wavelet = custom_ricker
-
+
# Set width parameter for the wavelet transform
widths = np.arange(1, 30)
-
+
# Perform the continuous wavelet transform
cwtmatr1 = signal.cwt(sig1, wavelet, widths)
cwtmatr2 = signal.cwt(sig2, wavelet, widths)
-
+
# Small constant for stability
k = 0.01
-
+
# Compute the first term (magnitude)
c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
c1_2 = np.square(abs(cwtmatr1))
c2_2 = np.square(abs(cwtmatr2))
num_ssim_1 = 2 * np.sum(c1c2, axis=0) + k
den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + k
-
+
# Compute the second term (phase)
c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + k
den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + k
-
+
# Construct the result
ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
-
+
# Average the per pixel results
index = np.average(ssim_map)
return index
@@ -636,9 +612,7 @@ def calculate_iw_ssim_similarity(i1_torch: torch.Tensor, i2_torch: torch.Tensor)
def calculate_fsim_similarity(i1_torch: torch.Tensor, i2_torch: torch.Tensor) -> float:
"""Calculate similarity using Feature Similarity Index."""
- fsim_similarity = piq.fsim(
- i1_torch, i2_torch, data_range=1.0, reduction='none', chromatic=False
- ).item()
+ fsim_similarity = piq.fsim(i1_torch, i2_torch, data_range=1.0, reduction='none', chromatic=False).item()
return fsim_similarity
@@ -731,9 +705,7 @@ def get_image_similarity(img1: np.ndarray, img2: np.ndarray, algorithm: str = 'S
i2_torch = to_torch(i2)
# Ensure both images are the same size for torch-based methods
- i1_torch = torch.nn.functional.interpolate(
- i1_torch, size=i2_torch.size()[2:], mode='bilinear', align_corners=False
- )
+ i1_torch = torch.nn.functional.interpolate(i1_torch, size=i2_torch.size()[2:], mode='bilinear', align_corners=False)
if algorithm == 'SIFT':
return calculate_sift_similarity(i1, i2)
@@ -767,9 +739,7 @@ def compute_similarity(args):
return i, j, 1.0
-def build_similarity_matrix(
- images: list[np.ndarray], algorithm: str = 'SSIM', fill_diagonal_value: float = 0.0
-) -> np.ndarray:
+def build_similarity_matrix(images: list[np.ndarray], algorithm: str = 'SSIM', fill_diagonal_value: float = 0.0) -> np.ndarray:
"""Builds a similarity matrix for a set of images.
For AffinityPropagation, SpectralClustering, and DBSCAN, one can input
@@ -791,9 +761,7 @@ def build_similarity_matrix(
np.fill_diagonal(sm, fill_diagonal_value)
# Prepare arguments for multiprocessing
- args = [
- (i, j, images, algorithm) for i in range(num_images) for j in range(i + 1, num_images)
- ]
+ args = [(i, j, images, algorithm) for i in range(num_images) for j in range(i + 1, num_images)]
# Use multiprocessing to compute similarities
with Pool() as pool:
@@ -806,9 +774,7 @@ def build_similarity_matrix(
return sm
-def get_cluster_metrics(
- X: np.ndarray, labels: np.ndarray, labels_true: Optional[np.ndarray] = None
-) -> dict[str, float]:
+def get_cluster_metrics(X: np.ndarray, labels: np.ndarray, labels_true: Optional[np.ndarray] = None) -> dict[str, float]:
"""Calculate cluster evaluation metrics based on the given data and labels.
Adapted from https://github.com/llvll/imgcluster
@@ -840,9 +806,7 @@ def get_cluster_metrics(
# Calinski-Harabasz: Higher is better (≥ 0)
if len(set(labels)) > 1:
- metrics_dict['Silhouette coefficient'] = silhouette_score(
- X, labels, metric='precomputed'
- )
+ metrics_dict['Silhouette coefficient'] = silhouette_score(X, labels, metric='precomputed')
# 1 - X transforms this similarity matrix into a dissimilarity matrix, which is required
# for the Davies-Bouldin index to calculate meaningful results
metrics_dict['Davies-Bouldin index'] = davies_bouldin_score(1 - X, labels)
@@ -854,9 +818,7 @@ def get_cluster_metrics(
return metrics_dict
-def determine_optimal_clusters(
- matrix: np.ndarray, method: str = 'silhouette', min_clust: int = 2, max_clust: int = 10
-) -> int:
+def determine_optimal_clusters(matrix: np.ndarray, method: str = 'silhouette', min_clust: int = 2, max_clust: int = 10) -> int:
"""Determines the optimal number of clusters using Spectral Clustering on a
similarity matrix. After clustering, evaluates the clustering using
Silhouette score.
@@ -882,9 +844,7 @@ def determine_optimal_clusters(
if method == 'silhouette':
spectral_scores = []
for k in range(min_clust, max_clust + 1):
- spectral_clustering = SpectralClustering(
- n_clusters=k, affinity='precomputed', random_state=42
- )
+ spectral_clustering = SpectralClustering(n_clusters=k, affinity='precomputed', random_state=42)
labels = spectral_clustering.fit_predict(matrix) # Use similarity matrix directly
# Evaluate clustering using silhouette score
@@ -903,9 +863,7 @@ def plot_scatter(features):
st.pyplot(fig)
-def plot_clusters(
- images: dict, labels: np.ndarray, n_clusters: int, title: str = 'Clustering results'
-) -> plt.Figure:
+def plot_clusters(images: dict, labels: np.ndarray, n_clusters: int, title: str = 'Clustering results') -> plt.Figure:
"""Plots the clustering results in 2D space using PCA.
Parameters
@@ -943,9 +901,7 @@ def plot_clusters(
return fig
-def plot_dendrogram(
- similarity_matrix: np.ndarray, labels: np.ndarray, method: str = 'ward', title: str = ''
-) -> plt.Figure:
+def plot_dendrogram(similarity_matrix: np.ndarray, labels: np.ndarray, method: str = 'ward', title: str = '') -> plt.Figure:
"""Plots a dendrogram for the clustering results.
Parameters
@@ -1035,9 +991,7 @@ def matrix_based_clustering(
metrics = {}
if method == 'SpectralClustering':
- sc = SpectralClustering(
- n_clusters=n_clusters, random_state=42, affinity='precomputed'
- ).fit(matrix)
+ sc = SpectralClustering(n_clusters=n_clusters, random_state=42, affinity='precomputed').fit(matrix)
metrics = get_cluster_metrics(matrix, sc.labels_, labels_true)
return sc.labels_, metrics, n_clusters
@@ -1124,9 +1078,7 @@ def feature_based_clustering(
n_clusters = len(np.unique(cluster_labels))
elif cluster_method == 'dbscan':
unique_labels = set(cluster_labels) if cluster_labels is not None else set()
- n_clusters = len(unique_labels) - (
- 1 if -1 in unique_labels else 0
- ) # Exclude noise points
+ n_clusters = len(unique_labels) - (1 if -1 in unique_labels else 0) # Exclude noise points
# Generate and display dendrogram if applicable
if cluster_method != 'kmeans':
@@ -1185,9 +1137,7 @@ def extract_foreground_mask(image: np.ndarray) -> np.ndarray:
return remove(image, mask=True)
-def extract_outer_contour_from_mask(
- mask: np.ndarray, min_area: int = 25, approx_method: int = cv2.CHAIN_APPROX_SIMPLE
-) -> Optional[np.ndarray]:
+def extract_outer_contour_from_mask(mask: np.ndarray, min_area: int = 25, approx_method: int = cv2.CHAIN_APPROX_SIMPLE) -> Optional[np.ndarray]:
"""Extract the outer contour from the mask.
Args:
@@ -1202,9 +1152,7 @@ def extract_outer_contour_from_mask(
"""
# Convert the mask to grayscale if it's in BGR format
gray_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
- _, thresh = cv2.threshold(
- gray_mask, 1, 255, cv2.THRESH_BINARY
- ) # threshold to get binary mask
+ _, thresh = cv2.threshold(gray_mask, 1, 255, cv2.THRESH_BINARY) # threshold to get binary mask
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, approx_method)
# Filter out small contours based on area
@@ -1253,9 +1201,7 @@ def extract_outer_contours_from_aligned_images(aligned_images: dict[str, np.ndar
return contours_dict
-def visualize_outer_contours(
- aligned_images: dict[str, np.ndarray], contours_dict: dict
-) -> None:
+def visualize_outer_contours(aligned_images: dict[str, np.ndarray], contours_dict: dict) -> None:
"""
Visualize the outer contours for the aligned images on a black background.
Args:
@@ -1287,14 +1233,10 @@ def visualize_outer_contours(
contour_images.append(ImageType(name=name, data=contour_canvas))
if contour_images:
- show_images_widget(
- contour_images, n_cols=4, key='contour_images', message='Contour Images Only'
- )
+ show_images_widget(contour_images, n_cols=4, key='contour_images', message='Contour Images Only')
-def visualize_clusters(
- labels, image_names, image_list, name_dict, title='Cluster visualization'
-):
+def visualize_clusters(labels, image_names, image_list, name_dict, title='Cluster visualization'):
"""Helper function to visualize clusters of images."""
if labels is not None:
st.subheader(title)
@@ -1305,9 +1247,7 @@ def visualize_clusters(
st.write(f'#### Images from cluster #{cluster_label}')
cluster_indices = np.argwhere(labels == n).flatten()
- cluster_images = [
- ImageType(name=image_names[i], data=image_list[i]) for i in cluster_indices
- ]
+ cluster_images = [ImageType(name=image_names[i], data=image_list[i]) for i in cluster_indices]
# Use the provided dictionary (name_dict) for visualizing the images
show_images_widget(
@@ -1328,9 +1268,7 @@ def compute_fourier_descriptors(contour: np.ndarray, num_coeff: int = 10) -> np.
return np.pad(descriptors, (0, max(0, num_coeff - len(descriptors))), 'constant')
-def compute_fourier_distance(
- contour1: np.ndarray, contour2: np.ndarray, num_coeff: int = 10
-) -> float:
+def compute_fourier_distance(contour1: np.ndarray, contour2: np.ndarray, num_coeff: int = 10) -> float:
"""Compute the distance between the Fourier descriptors of two contours."""
# Compute the Fourier descriptors for both contours
fd1 = compute_fourier_descriptors(contour1, num_coeff)
@@ -1354,22 +1292,16 @@ def compute_hog_features(contour: np.ndarray, image_shape: tuple) -> np.ndarray:
length."""
blank_image = np.zeros(image_shape, dtype=np.uint8)
cv2.drawContours(blank_image, [contour], -1, 255, 1)
- features, _ = hog(
- blank_image, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True
- )
+ features, _ = hog(blank_image, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True)
desired_length = 100 # Example length
- return np.pad(features, (0, max(0, desired_length - len(features))), 'constant')[
- :desired_length
- ]
+ return np.pad(features, (0, max(0, desired_length - len(features))), 'constant')[:desired_length]
def resample_contour(contour: np.ndarray, num_points: int = 100) -> np.ndarray:
"""Resamples a given contour to a specified number of points using linear
interpolation."""
x, y = contour[:, 0, 0], contour[:, 0, 1]
- cumulative_lengths = np.cumsum(
- np.sqrt(np.diff(x, prepend=x[0]) ** 2 + np.diff(y, prepend=y[0]) ** 2)
- )
+ cumulative_lengths = np.cumsum(np.sqrt(np.diff(x, prepend=x[0]) ** 2 + np.diff(y, prepend=y[0]) ** 2))
# Normalize to [0, 1]
cumulative_lengths /= cumulative_lengths[-1]
# Create interpolation functions
@@ -1420,9 +1352,7 @@ def compute_hausdorff_distance(contour1: np.ndarray, contour2: np.ndarray) -> fl
)
-def compute_frechet_distance(
- contour1: np.ndarray, contour2: np.ndarray, num_points: int = 100
-) -> float:
+def compute_frechet_distance(contour1: np.ndarray, contour2: np.ndarray, num_points: int = 100) -> float:
"""Compute an efficient approximation of the Fréchet distance between two
contours."""
# Resample both contours to the same number of points
@@ -1436,9 +1366,7 @@ def compute_frechet_distance(
return frechet_distance
-def extract_and_scale_features(
- contours_dict: dict, selected_features: list, image_shape: tuple
-) -> tuple[np.ndarray, None]:
+def extract_and_scale_features(contours_dict: dict, selected_features: list, image_shape: tuple) -> tuple[np.ndarray, None]:
"""Extract and scale several features from contours.
Parameters:
@@ -1482,9 +1410,7 @@ def compute_feature_matrix(contours_list, feature_func):
avg_hd = np.mean(hausdorff_matrix[idx, :][hausdorff_matrix[idx, :] > 0])
features_by_type['hd'].append([avg_hd])
if 'procrustes' in selected_features:
- procrustes_matrix = compute_feature_matrix(
- contours_list, compute_procrustes_distance
- )
+ procrustes_matrix = compute_feature_matrix(contours_list, compute_procrustes_distance)
avg_procrustes = np.mean(procrustes_matrix[idx, :][procrustes_matrix[idx, :] > 0])
features_by_type['procrustes'].append([avg_procrustes])
diff --git a/src/unraphael/dash/image_sim.py b/src/unraphael/dash/image_sim.py
index e953df8..0217fc3 100644
--- a/src/unraphael/dash/image_sim.py
+++ b/src/unraphael/dash/image_sim.py
@@ -50,9 +50,7 @@ def image_similarity_feat_ransac_widget(images, *, method: str):
if not st.checkbox('Continue...', key='continue_ransac'):
st.stop()
- features = _detect_and_extract(
- images=images, method=method, **st.session_state.config[method]
- )
+ features = _detect_and_extract(images=images, method=method, **st.session_state.config[method])
heatmaps = _get_heatmaps(features, **st.session_state.config['ransac'])
diff --git a/src/unraphael/dash/pages/1_preprocess.py b/src/unraphael/dash/pages/1_preprocess.py
index b056bc7..a9d3333 100644
--- a/src/unraphael/dash/pages/1_preprocess.py
+++ b/src/unraphael/dash/pages/1_preprocess.py
@@ -15,12 +15,7 @@
def preprocess_image_widget(image: ImageType) -> ImageType:
"""Widget to preprocess image with user input options."""
st.title(f'Preprocessing {image.name}')
- st.write(
- 'The processed image is shown with a preset of parameters. '
- 'Use the sliders to explore the effects of image filters, or to'
- 'refine the adjustment. When you are happy with the result, '
- 'download the processed image.'
- )
+ st.write('The processed image is shown with a preset of parameters. ' 'Use the sliders to explore the effects of image filters, or to' 'refine the adjustment. When you are happy with the result, ' 'download the processed image.')
col1, col2, col3, col4 = st.columns(4)
@@ -120,10 +115,7 @@ def remove_background_widget(image: ImageType) -> tuple[ImageType, ImageType]:
background_params['alpha_matting'] = col1.checkbox(
'Use Alpha matting',
value=False,
- help=(
- 'Alpha matting is a post processing step that can be used to '
- 'improve the quality of the output.'
- ),
+ help=('Alpha matting is a post processing step that can be used to ' 'improve the quality of the output.'),
)
background_params['only_mask'] = col1.checkbox('Keep mask only', value=False)
background_params['post_process_mask'] = col1.checkbox('Postprocess mask', value=False)
@@ -138,10 +130,7 @@ def remove_background_widget(image: ImageType) -> tuple[ImageType, ImageType]:
'Background color',
bgmap.keys(),
format_func=lambda x: bgmap[x],
- help=(
- 'You can use the post_process_mask argument to post process the '
- 'mask to get better results.'
- ),
+ help=('You can use the post_process_mask argument to post process the ' 'mask to get better results.'),
)
background_params['bg_threshold'] = col2.slider(
diff --git a/src/unraphael/dash/pages/4_compare.py b/src/unraphael/dash/pages/4_compare.py
index 4ea6a80..51fd233 100644
--- a/src/unraphael/dash/pages/4_compare.py
+++ b/src/unraphael/dash/pages/4_compare.py
@@ -10,7 +10,6 @@
from equalize import equalize_image_with_base
from image_clustering import (
calculate_brushstroke_similarity,
- calculate_cw_ssim_similarity,
calculate_fsim_similarity,
calculate_iw_ssim_similarity,
calculate_mse_similarity,
@@ -34,9 +33,7 @@
_equalize_image_with_base = st.cache_data(equalize_image_with_base)
-def add_text_to_image(
- image: np.ndarray, text1: str, text2: str, color1: tuple, color2: tuple
-) -> np.ndarray:
+def add_text_to_image(image: np.ndarray, text1: str, text2: str, color1: tuple, color2: tuple) -> np.ndarray:
"""Add two contour names below each other on the image with the given
colors."""
# Get image dimensions
@@ -103,9 +100,7 @@ def overlay_contours(image1, image2, name1, name2, contours1, contours2):
def warp_image_skimage(img, H, output_shape):
"""Warp the image using the given transformation matrix."""
- warped_image = transform.warp(
- img, inverse_map=H, output_shape=output_shape, mode='constant', cval=0
- )
+ warped_image = transform.warp(img, inverse_map=H, output_shape=output_shape, mode='constant', cval=0)
return img_as_ubyte(warped_image)
@@ -136,15 +131,11 @@ def update(frame):
im.set_array(blended_image)
ax.set_title(f'Frame {frame + 1}/{num_frames}', fontsize=16, color='white', pad=20)
- ani = animation.FuncAnimation(
- fig, update, frames=num_frames, interval=25, repeat=True, repeat_delay=1000
- )
+ ani = animation.FuncAnimation(fig, update, frames=num_frames, interval=25, repeat=True, repeat_delay=1000)
return ani
-def equalize_images_widget(
- *, base_image: np.ndarray, images: dict[str, np.ndarray]
-) -> list[np.ndarray]:
+def equalize_images_widget(*, base_image: np.ndarray, images: dict[str, np.ndarray]) -> list[np.ndarray]:
"""This widget helps with equalizing images."""
st.subheader('Equalization parameters')
@@ -162,10 +153,7 @@ def equalize_images_widget(
'reinhard': reinhard,
}
- return [
- _equalize_image_with_base(base_image=base_image, image=image, **preprocess_options)
- for image in images
- ]
+ return [_equalize_image_with_base(base_image=base_image, image=image, **preprocess_options) for image in images]
def align_images_widget(*, base_image: ImageType, images: list[ImageType]) -> list[ImageType]:
@@ -226,11 +214,7 @@ def align_images_widget(*, base_image: ImageType, images: list[ImageType]) -> li
motion_model = st.selectbox(
'Motion model:',
[None, 'translation', 'euclidian', 'affine', 'homography'],
- help=(
- 'The motion model defines the transformation between the base '
- 'image and the input images. Translation is the simplest model, '
- 'while homography is the most complex.'
- ),
+ help=('The motion model defines the transformation between the base ' 'image and the input images. Translation is the simplest model, ' 'while homography is the most complex.'),
)
if motion_model is None:
st.warning('Please select a transformation procedure to proceed.')
@@ -308,27 +292,10 @@ def alignment_help_widget():
)
)
st.write(
- (
- '- **Fourier Mellin Transform (FMT) Method**: Logarithm of the Fourier '
- 'magnitude of an image followed by another Fourier transform to obtain a '
- 'log-polar transform. Rotation and scale invariant but computationally '
- 'intensive compared to other methods.'
- )
- )
- st.write(
- (
- '- **Rotation Alignment Method**: Aligns images by finding the '
- 'optimal rotation to minimize the difference between them. Suited when '
- 'rotation is the primary misalignment source and computational cost '
- 'is not a major concern.'
- )
- )
- st.write(
- (
- '- **User-provided keypoints** (from pose estimation): '
- 'Aligns images based on user-provided keypoints obtained from pose estimation.'
- )
+ ('- **Fourier Mellin Transform (FMT) Method**: Logarithm of the Fourier ' 'magnitude of an image followed by another Fourier transform to obtain a ' 'log-polar transform. Rotation and scale invariant but computationally ' 'intensive compared to other methods.')
)
+ st.write(('- **Rotation Alignment Method**: Aligns images by finding the ' 'optimal rotation to minimize the difference between them. Suited when ' 'rotation is the primary misalignment source and computational cost ' 'is not a major concern.'))
+ st.write(('- **User-provided keypoints** (from pose estimation): ' 'Aligns images based on user-provided keypoints obtained from pose estimation.'))
def display_images_widget(
@@ -388,8 +355,7 @@ def to_grayscale(img: np.ndarray) -> np.ndarray:
with col1:
st.markdown('
', unsafe_allow_html=True)
st.markdown(
- '
'
- 'Structural Similarity Metrics
',
+ '' 'Structural Similarity Metrics
',
unsafe_allow_html=True,
)
st.markdown('
', unsafe_allow_html=True)
@@ -404,8 +370,8 @@ def to_grayscale(img: np.ndarray) -> np.ndarray:
ssim_similarity = calculate_ssim_similarity(base_gray, image_gray)
col1.metric('SSIM Similarity', f'{ssim_similarity:.2f}')
- #cwsim_similarity = calculate_cw_ssim_similarity(base_gray, image_gray)
- #col1.metric('CWSIM Similarity', f'{cwsim_similarity:.2f}')
+ # cwsim_similarity = calculate_cw_ssim_similarity(base_gray, image_gray)
+ # col1.metric('CWSIM Similarity', f'{cwsim_similarity:.2f}')
iw_ssim_similarity = calculate_iw_ssim_similarity(base_tensor, image_tensor)
col1.metric('IW-SSIM Similarity', f'{iw_ssim_similarity:.2f}')
@@ -419,8 +385,7 @@ def to_grayscale(img: np.ndarray) -> np.ndarray:
with col2:
st.markdown('
', unsafe_allow_html=True)
st.markdown(
- 'The slider can be used to '
- 'compare images side by side
',
+ 'The slider can be used to ' 'compare images side by side
',
unsafe_allow_html=True,
)
image_comparison(
@@ -484,9 +449,7 @@ def to_grayscale(img: np.ndarray) -> np.ndarray:
unsafe_allow_html=True,
)
- contour_overlay, color1, color2 = overlay_contours(
- base_image.data, image.data, base_image.name, image.name, contours1, contours2
- )
+ contour_overlay, color1, color2 = overlay_contours(base_image.data, image.data, base_image.name, image.name, contours1, contours2)
st.markdown(
f'{base_image.name}',
@@ -499,9 +462,7 @@ def to_grayscale(img: np.ndarray) -> np.ndarray:
st.image(contour_overlay, use_container_width=True)
# Add contour names below each other with corresponding colors
- contour_overlay_with_text = add_text_to_image(
- contour_overlay, base_image.name, image.name, color1=color1, color2=color2
- )
+ contour_overlay_with_text = add_text_to_image(contour_overlay, base_image.name, image.name, color1=color1, color2=color2)
col2.download_button(
label='Download Overlay Contours',
data=imageio.imwrite('', contour_overlay_with_text, extension='.png'),
diff --git a/src/unraphael/dash/pages/5_cluster.py b/src/unraphael/dash/pages/5_cluster.py
index 5e4f56d..2431567 100644
--- a/src/unraphael/dash/pages/5_cluster.py
+++ b/src/unraphael/dash/pages/5_cluster.py
@@ -159,9 +159,7 @@ def equalize_images_widget(*, images: dict[str, np.ndarray]) -> dict[str, np.nda
return {name: equalized_images[i] for i, name in enumerate(images.keys())}
-def align_to_mean_image_widget(
- *, images: dict[str, np.ndarray]
-) -> Optional[dict[str, np.ndarray]]:
+def align_to_mean_image_widget(*, images: dict[str, np.ndarray]) -> Optional[dict[str, np.ndarray]]:
"""This widget aligns a set of images to a reference image using various
transformation models, typically to their mean value, but other aligning
options are also available. The aligned images are used for later
@@ -210,10 +208,7 @@ def align_to_mean_image_widget(
'Transformation model:',
[None, 'translation', 'rigid body', 'scaled rotation', 'affine', 'bilinear'],
index=0, # default to none
- help=(
- 'The transformation model defines the geometric transformation'
- 'one wants to apply.'
- ),
+ help=('The transformation model defines the geometric transformationone wants to apply.'),
)
if motion_model is None:
@@ -276,9 +271,7 @@ def select_cluster_approach() -> str:
)
-def cluster_on_outer_contours(
- images: Dict[str, np.ndarray], image_names: List[str], image_list: List[np.ndarray]
-) -> None:
+def cluster_on_outer_contours(images: Dict[str, np.ndarray], image_names: List[str], image_list: List[np.ndarray]) -> None:
"""Handle clustering based on outer contours."""
st.write('Extracting outer contours from the aligned images...')
contours_dict = _extract_outer_contours(images)
@@ -355,9 +348,7 @@ def select_cluster_evaluation(cluster_method: str) -> str:
return 'silhouette'
else:
- return st.selectbox(
- 'Cluster evaluation method:', ['silhouette', 'dbindex', 'derivative']
- )
+ return st.selectbox('Cluster evaluation method:', ['silhouette', 'dbindex', 'derivative'])
def select_cluster_linkage() -> str:
@@ -369,9 +360,7 @@ def select_cluster_linkage() -> str:
)
-def cluster_on_complete_figures(
- images: Dict[str, np.ndarray], image_names: List[str], image_list: List[np.ndarray]
-) -> None:
+def cluster_on_complete_figures(images: Dict[str, np.ndarray], image_names: List[str], image_list: List[np.ndarray]) -> None:
"""Handle clustering based on complete figures."""
st.subheader('The aligned images')
@@ -387,9 +376,7 @@ def cluster_on_complete_figures(
if cluster_method == 'SpectralClustering':
specify_clusters = st.checkbox('Specify number of clusters?', value=False)
if specify_clusters:
- n_clusters = st.number_input(
- 'Number of clusters:', min_value=2, step=1, value=4
- )
+ n_clusters = st.number_input('Number of clusters:', min_value=2, step=1, value=4)
measure = select_similarity_measure()
@@ -399,9 +386,7 @@ def cluster_on_complete_figures(
st.subheader(f'Similarity matrix based on pairwise {measure} indices')
matrix = _build_similarity_matrix(np.array(image_list), algorithm=measure)
st.write(np.round(matrix, decimals=2))
- labels, metrics, n_clusters = _matrix_based_clustering(
- matrix, algorithm=measure, n_clusters=n_clusters, method=cluster_method
- )
+ labels, metrics, n_clusters = _matrix_based_clustering(matrix, algorithm=measure, n_clusters=n_clusters, method=cluster_method)
if labels is None:
st.error('Clustering failed. Check parameters and try again.')
@@ -431,9 +416,7 @@ def cluster_on_complete_figures(
visualize_clusters(labels, image_names, image_list, image_names)
elif cluster_method in ['agglomerative', 'dbscan', 'kmeans']:
- cluster_evaluation = st.selectbox(
- 'Cluster evaluation method:', ['silhouette', 'dbindex', 'derivative']
- )
+ cluster_evaluation = st.selectbox('Cluster evaluation method:', ['silhouette', 'dbindex', 'derivative'])
cluster_linkage = st.selectbox(
'Linkage method:',
['ward', 'single', 'complete', 'average', 'weighted', 'centroid', 'median'],
@@ -530,24 +513,15 @@ def main():
# Automatically align if there are unaligned images
if unaligned_images:
- st.warning(
- "It appears your images are not aligned yet. Let's do that in the following step..."
- )
+ st.warning("It appears your images are not aligned yet. Let's do that in the following step...")
aligned_images = align_to_mean_image_widget(images=images)
else:
- st.success(
- "All images appear to be already aligned. Let's proceed to the following step..."
- )
+ st.success("All images appear to be already aligned. Let's proceed to the following step...")
aligned_images = images
if aligned_images:
# Convert to uint8 if necessary
- aligned_images = {
- name: (image * 255).astype(np.uint8)
- if image.dtype == np.float64
- else image.astype(np.uint8)
- for name, image in aligned_images.items()
- }
+ aligned_images = {name: (image * 255).astype(np.uint8) if image.dtype == np.float64 else image.astype(np.uint8) for name, image in aligned_images.items()}
st.markdown('---')
cluster_image_widget(aligned_images)
diff --git a/src/unraphael/dash/pages/6_ratios.py b/src/unraphael/dash/pages/6_ratios.py
index a154f74..2444b3f 100644
--- a/src/unraphael/dash/pages/6_ratios.py
+++ b/src/unraphael/dash/pages/6_ratios.py
@@ -9,15 +9,15 @@
import pandas as pd
import seaborn as sns
import streamlit as st
+from matplotlib.colors import LinearSegmentedColormap
from PIL import Image
from ratio_analysis import calculate_corrected_area, get_image_size_resolution
from rembg import remove
-from matplotlib.colors import LinearSegmentedColormap
def main():
st.title('Painting Analysis')
-
+
st.write(
(
'This page estimates and compares the areas of the main figures in the real paintings '
@@ -28,12 +28,9 @@ def main():
)
)
-
# Load information with real dimensions of paintings
st.sidebar.header('Upload painting dimensions')
- uploaded_excel = st.sidebar.file_uploader(
- 'Choose Excel file with real dimensions', type=['xlsx']
- )
+ uploaded_excel = st.sidebar.file_uploader('Choose Excel file with real dimensions', type=['xlsx'])
if uploaded_excel:
try:
@@ -124,9 +121,7 @@ def main():
image_file.seek(0) # Rewind file pointer for reading
# Extract DPI from the image's metadata (if it exists)
- height_pixels_meta, width_pixels_meta, (dpi_x, dpi_y), height_inches, width_inches = (
- get_image_size_resolution(image['data'])
- )
+ height_pixels_meta, width_pixels_meta, (dpi_x, dpi_y), height_inches, width_inches = get_image_size_resolution(image['data'])
# Calculate the physical size of the photo using DPI from image metadata
height_photo_inches = height_pixels_meta / dpi_x # DPI for height from image metadata
@@ -150,24 +145,21 @@ def main():
for idx, image in enumerate(images):
mask = remove(image['data'], only_mask=True)
cols[idx % 3].image(
- mask,
- caption=f'Mask for {image["name"]}',
- use_container_width=True # Changed from use_column_width
+ mask,
+ caption=f'Mask for {image["name"]}',
+ use_container_width=True, # Changed from use_column_width
)
# # Calculate corrected areas ----
st.subheader('Area Analysis')
atol_value = st.slider(
- 'Set the tolerance for area comparison. This is the maximum difference '
- 'in area surface that is allowed between two paintings to consider '
- 'them similar. The default is 5% (0.05)',
+ 'Set the tolerance for area comparison. This is the maximum difference ' 'in area surface that is allowed between two paintings to consider ' 'them similar. The default is 5% (0.05)',
min_value=0.01,
max_value=0.10,
value=0.05,
step=0.01,
help='Adjust the tolerance level for comparing areas (5% = 0.05)',
-
)
corrected_areas = []
@@ -198,12 +190,14 @@ def main():
heatmap_data[i, j] = ratio
# Create custom colormap
- colors = ['#FF6B6B', # Light red/coral for extreme values
- '#4FB5E6', # Light blue
- '#05445E', # Dark blue (for values near 1)
- '#4FB5E6', # Light blue
- '#FF6B6B'] # Light red/coral for extreme values
-
+ colors = [
+ '#FF6B6B', # Light red/coral for extreme values
+ '#4FB5E6', # Light blue
+ '#05445E', # Dark blue (for values near 1)
+ '#4FB5E6', # Light blue
+ '#FF6B6B',
+ ] # Light red/coral for extreme values
+
custom_cmap = LinearSegmentedColormap.from_list('custom_blues', colors)
# Create heatmap
diff --git a/src/unraphael/dash/ratio_analysis.py b/src/unraphael/dash/ratio_analysis.py
index 955b554..f4f0ff2 100644
--- a/src/unraphael/dash/ratio_analysis.py
+++ b/src/unraphael/dash/ratio_analysis.py
@@ -19,9 +19,7 @@
_load_images_from_drc = st.cache_data(load_images_from_drc)
-def get_image_size_resolution(
- image_array: np.ndarray, name=None
-) -> Tuple[int, int, Tuple[float, float], float, float]:
+def get_image_size_resolution(image_array: np.ndarray, name=None) -> Tuple[int, int, Tuple[float, float], float, float]:
"""Get the height, width, and resolution of an image from an in-memory
NumPy array."""
try:
@@ -89,10 +87,7 @@ def get_image_size_resolution(
# Default DPI if no resolution found
if not dpi or dpi == (0, 0):
if name:
- print(
- f'DPI information not found in any metadata for image {name}. '
- 'Using default.'
- )
+ print(f'DPI information not found in any metadata for image {name}. ' 'Using default.')
dpi = (96.0, 96.0) # Common default DPI
dpi_x, dpi_y = dpi
@@ -110,9 +105,7 @@ def get_image_size_resolution(
raise
-def calculate_corrected_area(
- image: np.ndarray, real_size_cm: list[float], dpi: float, tolerance: float = 0.05
-) -> Optional[float]:
+def calculate_corrected_area(image: np.ndarray, real_size_cm: list[float], dpi: float, tolerance: float = 0.05) -> Optional[float]:
"""Calculate the corrected area of an image based on real physical
dimensions and DPI.
@@ -135,9 +128,7 @@ def calculate_corrected_area(
try:
# Print input parameters
print('\nInput Parameters:')
- print(
- f'Dimensions of real painting (cm): {real_size_cm[0]:.2f} x {real_size_cm[1]:.2f}'
- )
+ print(f'Dimensions of real painting (cm): {real_size_cm[0]:.2f} x {real_size_cm[1]:.2f}')
print(f'DPI: {dpi:.2f}')
# Create mask of the main figure
@@ -166,15 +157,12 @@ def calculate_corrected_area(
# Calculate physical dimensions from pixels and DPI
photo_height_inches = img_height / dpi
photo_width_inches = img_width / dpi
- print(
- '\nPhoto dimensions (inches): '
- '{photo_height_inches:.2f} x {photo_width_inches:.2f}'
- )
+ print('\nPhoto dimensions (inches): {photo_height_inches:.2f} x {photo_width_inches:.2f}')
# Convert real dimensions to inches
real_height_inches = real_size_cm[0] / 2.54
real_width_inches = real_size_cm[1] / 2.54
- print('Real dimensions (inches): ' '{real_height_inches:.2f} x {real_width_inches:.2f}')
+ print('Real dimensions (inches): {real_height_inches:.2f} x {real_width_inches:.2f}')
# Calculate scaling ratios
height_ratio = real_height_inches / photo_height_inches
@@ -189,11 +177,7 @@ def calculate_corrected_area(
# If scaling ratios are too different, use the less extreme ratio
if ratio_diff > tolerance:
- st.warning(
- f'Inconsistent scaling detected with a {ratio_diff:.2%} '
- 'difference between height and width ratios. Using more '
- 'conservative scaling.'
- )
+ st.warning(f'Inconsistent scaling detected with a {ratio_diff:.2%} ' 'difference between height and width ratios. Using more ' 'conservative scaling.')
# Use the ratio closer to 1.0 to minimize distortion
if abs(height_ratio - 1.0) < abs(width_ratio - 1.0):
scaling_factor = height_ratio
diff --git a/src/unraphael/dash/widgets.py b/src/unraphael/dash/widgets.py
index e028044..fa1e7c7 100644
--- a/src/unraphael/dash/widgets.py
+++ b/src/unraphael/dash/widgets.py
@@ -36,9 +36,7 @@ def show_images_widget(
) -> None | ImageType:
"""Widget to show images with given number of columns."""
col1, col2 = st.columns(2)
- n_cols = col1.number_input(
- 'Number of columns for display', value=8, min_value=1, step=1, key=f'{key}_cols'
- )
+ n_cols = col1.number_input('Number of columns for display', value=8, min_value=1, step=1, key=f'{key}_cols')
options = [None] + [image.name for image in images]
selected = col2.selectbox(message, options=options, key=f'{key}_sel')
selected_image = None
diff --git a/src/unraphael/feature.py b/src/unraphael/feature.py
index 9cd772a..cce4e71 100644
--- a/src/unraphael/feature.py
+++ b/src/unraphael/feature.py
@@ -35,9 +35,7 @@ def plot_keypoints(self):
return fig
-def detect_and_extract(
- images: list[ImageType], *, method: str, **kwargs
-) -> dict[str, FeatureContainer]:
+def detect_and_extract(images: list[ImageType], *, method: str, **kwargs) -> dict[str, FeatureContainer]:
"""`extractor` must have `detect_and_extract` method."""
features = {}
diff --git a/src/unraphael/preprocess.py b/src/unraphael/preprocess.py
index ba771f3..4be53ea 100644
--- a/src/unraphael/preprocess.py
+++ b/src/unraphael/preprocess.py
@@ -64,17 +64,11 @@ def process_image(
]
# Apply bilateral blur filter to each color channel
- channels = [
- rank.mean_bilateral(channel, footprint=disk(bilateral_strength), s0=55, s1=55)
- for channel in channels
- ]
+ channels = [rank.mean_bilateral(channel, footprint=disk(bilateral_strength), s0=55, s1=55) for channel in channels]
# Contrast limited adabtive histogram equalization
kernel_size = tuple([max(s // clahe_tiles, 1) for s in image.shape[:2]])
- channels = [
- equalize_adapthist(channel, clip_limit=clahe_clip_limit, kernel_size=kernel_size)
- for channel in channels
- ]
+ channels = [equalize_adapthist(channel, clip_limit=clahe_clip_limit, kernel_size=kernel_size) for channel in channels]
image = np.dstack(channels)
diff --git a/tests/dash_test.py b/tests/dash_test.py
index f6627a9..79255d8 100644
--- a/tests/dash_test.py
+++ b/tests/dash_test.py
@@ -16,11 +16,7 @@ def test_home():
assert not at.exception
-@pytest.mark.xfail(
- reason='Fails with Thread "MainThread": missing ScriptRunContext'
- 'on the CI because of rembg dependency. Only the first time the page is'
- 'loaded does the test fail.'
-)
+@pytest.mark.xfail(reason='Fails with Thread "MainThread": missing ScriptRunContext' 'on the CI because of rembg dependency. Only the first time the page is' 'loaded does the test fail.')
def test_preprocess_load():
at = AppTest.from_file(str(dash_directory / 'pages' / '1_preprocess.py'))
at.run(timeout=5)