csi_images.csi_images

  1import warnings
  2from typing import Literal
  3
  4import cv2
  5import numpy as np
  6import pandas as pd
  7
  8from PIL import Image, ImageFont, ImageDraw
  9from skimage.measure import regionprops_table
 10
 11# Avoid opening multiple fonts and re-opening fonts
 12opened_font: ImageFont.FreeTypeFont | None = None
 13
 14
 15def extract_mask_info(
 16    mask: np.ndarray,
 17    images: list[np.ndarray] = None,
 18    image_labels: list[str] = None,
 19    properties: list[str] = None,
 20) -> pd.DataFrame:
 21    """
 22    Extracts events from a mask. Originated from @vishnu
 23    :param mask: mask to extract events from
 24    :param images: list of intensity images to extract from
 25    :param image_labels: list of labels for images
 26    :param properties: list of properties to extract in addition to the defaults:
 27    label, centroid, axis_major_length. See
 28    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
 29    for additional properties.
 30    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
 31    """
 32    # Return empty if the mask is empty
 33    if np.max(mask) == 0:
 34        return pd.DataFrame()
 35    # Reshape any intensity images
 36    if images is not None:
 37        if isinstance(images, list):
 38            images = np.stack(images, axis=-1)
 39        if image_labels is not None and len(image_labels) != images.shape[-1]:
 40            raise ValueError("Number of image labels must match number of images.")
 41    # Accumulate any extra properties
 42    base_properties = ["label", "centroid"]
 43    if properties is not None:
 44        properties = base_properties + properties
 45    else:
 46        properties = base_properties
 47
 48    # Use skimage.measure.regionprops_table to compute properties
 49    info = pd.DataFrame(
 50        regionprops_table(mask, intensity_image=images, properties=properties)
 51    )
 52
 53    # Rename columns to match desired output
 54    info = info.rename(
 55        columns={
 56            "label": "id",
 57            "centroid-0": "y",
 58            "centroid-1": "x",
 59        },
 60    )
 61    # Also rename channel-specific columns if necessary
 62    renamings = {}
 63    for column in info.columns:
 64        if column.find("-") != -1:
 65            for i in range(len(image_labels)):
 66                suffix = f"-{i}"
 67                if column.endswith(suffix):
 68                    renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
 69    info = info.rename(columns=renamings)
 70
 71    return info
 72
 73
 74def add_mask_overlay(
 75    images: np.ndarray | list[np.ndarray],
 76    mask: np.ndarray[np.uint8],
 77    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
 78):
 79    """
 80    Creates a 1-pixel wide border around the mask in the image.
 81    :param images: (H, W, 3), or (H, W) image or list of images.
 82    :param mask: (H, W) binary mask to overlay on the image.
 83    :param overlay_color: color of the outline for RGB images.
 84    Ignored for grayscale images.
 85    :return:
 86    """
 87    results = []
 88    # Temporarily make into a list
 89    return_array = False
 90    if isinstance(images, np.ndarray):
 91        images = [images]
 92        return_array = True
 93
 94    # Get the mask outline
 95    mask_kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8)
 96    outline = cv2.morphologyEx(mask, cv2.MORPH_DILATE, mask_kernel) - mask
 97
 98    # Add mask overlay to images
 99    for image in images:
100        if image.shape[:2] != mask.shape:
101            raise ValueError("Image and mask must have the same shape.")
102        if np.issubdtype(image.dtype, np.unsignedinteger):
103            # Unsigned integer; scale outline to image range
104            this_outline = outline.astype(image.dtype) * np.iinfo(image.dtype).max
105        else:
106            # Floating point; scale outline to [0, 1]
107            this_outline = outline.astype(image.dtype)
108        if len(image.shape) == 3:
109            # (H, W, 3) RGB image
110            # Scale outline with color and match dtype
111            this_outline = np.stack([this_outline * c for c in overlay_color], axis=-1)
112            # Set outline to 0, then to outline color
113            result = image * np.stack([1 - outline] * 3, axis=-1)
114            result += this_outline.astype(image.dtype)
115        elif len(image.shape) == 2:
116            # (H, W) grayscale image
117            result = image * (1 - outline)
118            result += this_outline.astype(image.dtype)
119        results.append(result)
120
121    if return_array:
122        return results[0]
123    else:
124        return results
125
126
127def make_rgb(
128    images: list[np.ndarray],
129    colors=list[tuple[float, float, float]],
130    mask: np.ndarray[np.uint8] = None,
131    mask_mode: Literal["overlay", "hard"] = "overlay",
132    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
133) -> np.ndarray:
134    """
135    Combine multiple channels into a single RGB image.
136    :param images: list of numpy arrays representing the channels.
137    :param colors: list of RGB tuples for each channel.
138    :param mask: numpy array representing a mask to overlay on the image.
139    :param mask_mode: whether to overlay the mask or use it as a hard mask.
140    :param overlay_color: color of the outline for RGB images.
141    Ignored for grayscale images.
142    :return: (H, W, 3) numpy array representing the RGB image.
143    """
144    if len(images) == 0:
145        raise ValueError("No images provided.")
146    if len(colors) == 0:
147        raise ValueError("No colors provided.")
148    if len(images) != len(colors):
149        raise ValueError("Number of images and colors must match.")
150    if not all([isinstance(image, np.ndarray) for image in images]):
151        raise ValueError("Images must be numpy arrays.")
152    if not all([len(c) == 3 for c in colors]):
153        raise ValueError("Colors must be RGB tuples.")
154
155    # Create an output with same shape and larger type to avoid overflow
156    dims = images[0].shape
157    dtype = images[0].dtype
158
159    if dtype == np.uint8:
160        temp_dtype = np.uint16
161    elif dtype == np.uint16:
162        temp_dtype = np.uint32
163    else:
164        temp_dtype = np.float64
165    rgb = np.zeros((*dims, 3), dtype=temp_dtype)
166
167    # Combine images with colors (can also be thought of as gains)
168    for image, color in zip(images, colors):
169        if image.shape != dims:
170            raise ValueError("All images must have the same shape.")
171        if image.dtype != dtype:
172            raise ValueError("All images must have the same dtype.")
173        rgb += np.stack([image * c for c in color], axis=-1).astype(temp_dtype)
174
175    # Cut off any overflow and convert back to original dtype
176    if np.issubdtype(dtype, np.unsignedinteger):
177        max_value = np.iinfo(dtype).max
178    else:
179        max_value = 1.0
180    rgb = np.clip(rgb, 0, max_value).astype(dtype)
181
182    # Add a mask if desired
183    if mask is not None:
184        if mask.shape != dims:
185            raise ValueError("Mask must have the same shape as the images.")
186        if mask_mode == "overlay":
187            rgb = add_mask_overlay(rgb, mask, overlay_color)
188        elif mask_mode == "hard":
189            rgb = rgb * np.stack([mask] * 3, axis=-1)
190        else:
191            raise ValueError("Mask mode must be 'overlay' or 'hard'.")
192
193    return rgb
194
195
196def make_montage(
197    images: list[np.ndarray],
198    order: list[int] | None,
199    composites: dict[int, tuple[float, float, float]] | None,
200    mask: np.ndarray[np.uint8] = None,
201    labels: list[str] = None,
202    mask_mode: Literal["overlay", "hard"] = "overlay",
203    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
204    label_font: str = "Roboto-Regular.ttf",
205    label_size: int | float = 0.18,
206    label_outline: bool = True,
207    colored_labels: bool = True,
208    border_size: int = 1,
209    horizontal: bool = True,
210    dtype=np.uint8,
211) -> np.ndarray:
212    """
213    Combine multiple images into a single montage based on order.
214    Can include a composite (always first).
215    :param images: list of numpy arrays representing the images.
216    :param order: list of indices for the images going into the montage or None.
217    :param composites: dictionary of indices and RGB tuples for a composite or None.
218    :param mask: numpy array representing a mask to overlay on the image.
219    :param mask_mode: whether to overlay the mask or use it as a hard mask.
220    :param overlay_color: color of the outline for RGB images. Ignored for grayscale.
221    :param labels: list of labels for the images. If length == len(order), will apply to
222    grayscale images only; if length == len(order) + 1 and composites exist, will apply
223    to all images.
224    :param label_font: path to a font file for labels. See PIL.ImageFont for details.
225    :param label_size: size of the font for labels. If a float, calculates a font size
226    as a fraction of the image size.
227    :param label_outline: whether to draw an outline around the label text.
228    :param colored_labels: whether to color the labels based on the composites.
229    :param border_size: width of the border between images.
230    :param horizontal: whether to stack images horizontally or vertically.
231    :param dtype: the dtype of the output montage.
232    :return: numpy array representing the montage.
233    """
234    if len(images) == 0:
235        raise ValueError("No images provided.")
236    if not all([isinstance(image, np.ndarray) for image in images]):
237        raise ValueError("Images must be numpy arrays.")
238    if not all([len(image.shape) == 2 for image in images]):
239        raise ValueError("Images must be 2D.")
240    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
241        raise ValueError("Composites must be RGB tuples.")
242
243    n_images = len(order) if order is not None else 0
244    n_images += 1 if composites is not None else 0
245
246    if n_images == 0:
247        raise ValueError("No images or composites requested.")
248
249    # Adapt label font size if necessary
250    if isinstance(label_size, float):
251        label_size = int(images[0].shape[1] * label_size)
252
253    # Populate the montage with black
254    montage = np.full(
255        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
256        np.iinfo(dtype).max,  # White fill
257        dtype=dtype,
258    )
259
260    # Load font if necessary
261    global opened_font
262    if labels is not None and len(order) <= len(labels) <= n_images:
263        if (
264            opened_font is None
265            or opened_font.path != label_font
266            or opened_font.size != label_size
267        ):
268            try:
269                opened_font = ImageFont.truetype(label_font, label_size)
270            except OSError:
271                warnings.warn(f"Could not load font {label_font}. Using defaults.")
272                opened_font = ImageFont.load_default(label_size)
273    elif labels is not None:
274        raise ValueError("Number of labels must be 0, match order, or match images.")
275
276    # Populate the montage with images
277    offset = border_size  # Keeps track of the offset for the next image
278    image_height, image_width = images[0].shape
279
280    # Composite first
281    if composites is not None and len(composites) > 0:
282        image = make_rgb(
283            [images[i] for i in composites.keys()],
284            list(composites.values()),
285            mask,
286            mask_mode,
287            overlay_color,
288        )
289
290        if labels is not None and len(labels) == n_images:
291            image = scale_bit_depth(image, np.uint8)  # Required for PIL
292            # Draw a label on the composite
293            pillow_image = Image.fromarray(image)
294            # Determine the fill color based on the average intensity of the image
295            included_height = max(label_size * 2, image.shape[1])
296            if get_image_lightness(image[:, -included_height:, :]) > 50:
297                text_fill = (0, 0, 0)
298                outline_fill = (255, 255, 255)
299            else:
300                text_fill = (255, 255, 255)
301                outline_fill = (0, 0, 0)
302            draw = ImageDraw.Draw(pillow_image, "RGB")
303            draw.text(
304                (image.shape[0] // 2, image.shape[1]),
305                labels[0],
306                fill=text_fill,
307                anchor="md",  # Middle, descender (absolute bottom of font)
308                font=opened_font,
309                stroke_width=round(label_size / 10) if label_outline else 0,
310                stroke_fill=outline_fill,
311            )
312            image = np.asarray(pillow_image)
313            labels = labels[1:]
314
315        # Scale to desired dtype
316        image = scale_bit_depth(image, dtype)
317
318        if horizontal:
319            montage[
320                border_size : border_size + image_height,
321                offset : offset + image_width,
322            ] = image
323            offset += image_width + border_size
324        else:
325            montage[
326                offset : offset + image_height,
327                border_size : border_size + image_width,
328            ] = image
329            offset += image_height + border_size
330
331    # Grayscale order next
332    order = [] if order is None else order
333    for i, o in enumerate(order):
334        image = images[o]
335        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
336
337        if mask is not None:
338            if mask_mode == "overlay":
339                image = add_mask_overlay(image, mask, overlay_color)
340            elif mask_mode == "hard":
341                image *= np.stack([mask] * 3, axis=-1)
342            else:
343                raise ValueError("Mask mode must be 'overlay' or 'hard'.")
344
345        if labels is not None and len(labels) == len(order):
346            image = scale_bit_depth(image, np.uint8)  # Required for PIL
347            pillow_image = Image.fromarray(image)
348            if colored_labels and o in composites:
349                text_fill = tuple(round(255 * rgb_f) for rgb_f in composites[o])
350                if get_lightness(composites[o]) > 50:
351                    outline_fill = (0, 0, 0)
352                else:
353                    outline_fill = (255, 255, 255)
354            else:
355                # Determine the color based on the average intensity of the image
356                included_height = max(label_size * 2, image.shape[1])
357                if get_image_lightness(image[:, -included_height:, :]) > 50:
358                    text_fill = (0, 0, 0)
359                    outline_fill = (255, 255, 255)
360                else:
361                    text_fill = (255, 255, 255)
362                    outline_fill = (0, 0, 0)
363            draw = ImageDraw.Draw(pillow_image, "RGB")
364            draw.text(
365                (image.shape[0] // 2, image.shape[1]),
366                labels[i],
367                fill=text_fill,
368                anchor="md",  # Middle, descender (absolute bottom of font)
369                font=opened_font,
370                stroke_width=round(label_size / 10) if label_outline else 0,
371                stroke_fill=outline_fill,
372            )
373            image = np.asarray(pillow_image)
374
375        # Scale to desired dtype
376        image = scale_bit_depth(image, dtype)
377
378        if horizontal:
379            montage[
380                border_size : border_size + image_height,
381                offset : offset + image_width,
382            ] = image
383            offset += image_width + border_size
384        else:
385            montage[
386                offset : offset + image_height,
387                border_size : border_size + image_width,
388            ] = image
389            offset += image_height + border_size
390
391    return montage
392
393
394def get_montage_shape(
395    image_shape: tuple[int, int],
396    n_images: int,
397    border_size: int = 1,
398    horizontal: bool = True,
399) -> tuple[int, int, int]:
400    """
401    Determine the size of the montage based on the images and order.
402    :param image_shape: tuple of height, width of the base images going into the montage.
403    :param n_images: how many images are going into the montage, including composite.
404    :param border_size: width of the border between images.
405    :param horizontal: whether to stack images horizontally or vertically.
406    :return: tuple of the height, width, and channels (always 3) of the montage.
407    """
408    if len(image_shape) != 2:
409        raise ValueError("Image shape must be a tuple of height, width.")
410    if image_shape[0] < 1 or image_shape[1] < 1:
411        raise ValueError("Image shape must be positive.")
412    if not isinstance(n_images, int) or n_images < 1:
413        raise ValueError("Number of images must be a positive integer.")
414
415    # Determine the size of the montage
416    if horizontal:
417        n_rows = 1
418        n_cols = n_images
419    else:
420        n_rows = n_images
421        n_cols = 1
422
423    # Determine the montage size
424    image_height, image_width = image_shape
425    montage_height = n_rows * image_height + (n_rows + 1) * border_size
426    montage_width = n_cols * image_width + (n_cols + 1) * border_size
427
428    return montage_height, montage_width, 3  # 3 for RGB
429
430
431def scale_bit_depth(
432    image: np.ndarray, dtype: np.dtype, real_bits: int = None
433) -> np.ndarray:
434    """
435    Converts the image to the desired bit depth, factoring in real bit depth.
436    :param image: numpy array representing the image.
437    :param dtype: the desired dtype of the image.
438    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
439    :return: numpy array representing the image with the new dtype.
440    """
441    if not isinstance(image, np.ndarray):
442        raise ValueError("Image must be a numpy array.")
443    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
444        image.dtype, np.floating
445    ):
446        raise ValueError("Input image dtype must be an unsigned integer or float.")
447    if np.issubdtype(image.dtype, np.floating) and (
448        np.min(image) < 0 or np.max(image) > 1
449    ):
450        raise ValueError("Image values must be between 0 and 1.")
451    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
452        dtype, np.floating
453    ):
454        raise ValueError("Output dtype must be an unsigned integer or float.")
455
456    # First, determine the scaling required for the real bit depth
457    scale = 1
458    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
459        dtype_bit_depth = np.iinfo(image.dtype).bits
460        if real_bits > dtype_bit_depth:
461            raise ValueError("Real bits must be less than or equal to image bit depth")
462        elif real_bits < dtype_bit_depth:
463            # We should scale up the values to the new bit depth
464            if np.max(image) > 2**real_bits:
465                raise ValueError("Image values exceed real bit depth; already scaled?")
466            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
467
468    # Already validated that the min is 0; determine the max
469    if np.issubdtype(image.dtype, np.unsignedinteger):
470        in_max = np.iinfo(image.dtype).max
471    else:
472        in_max = 1.0
473    if np.issubdtype(dtype, np.unsignedinteger):
474        out_max = np.iinfo(dtype).max
475    else:
476        out_max = 1.0
477
478    # Scale the image to the new bit depth
479    scale = scale * out_max / in_max
480    image = (image * scale).astype(dtype)
481    return image
482
483
484def get_image_lightness(image: np.ndarray) -> float:
485    """
486    Calculate the lightness of an sRGB image, taking shortcuts for speed.
487    :param image: numpy array representing the sRGB image.
488    :return: approximate perceived lightness of the image, from 0 to 100.
489    """
490    # Scale image to [0, 1]
491    if np.issubdtype(image.dtype, np.unsignedinteger):
492        image = image / np.iinfo(image.dtype).max
493    # Rough conversion to linear RGB
494    image = image**2.2
495    # Average to a single color and return that color's lightness
496    color = np.mean(image, axis=(0, 1))
497    return get_lightness((color[0], color[1], color[2]), srgb=False)
498
499
500def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
501    """
502    Calculate the lightness of an sRGB color, taking shortcuts for speed.
503    :param color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
504    :param srgb: whether the color is in sRGB or linear RGB.
505    :return: approximate perceived lightness of the color, from 0 to 100.
506    """
507    if srgb:
508        # Convert to linear color, rough and quick
509        rgb = color[0] ** 2.2, color[1] ** 2.2, color[2] ** 2.2
510    else:
511        rgb = color
512    # Convert to luminance
513    luminance = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
514    # Convert to perceived lightness
515    if luminance <= 0.008856:
516        return 903.3 * luminance
517    else:
518        return 116 * luminance ** (1 / 3) - 16
opened_font: PIL.ImageFont.FreeTypeFont | None = None
def extract_mask_info( mask: numpy.ndarray, images: list[numpy.ndarray] = None, image_labels: list[str] = None, properties: list[str] = None) -> pandas.core.frame.DataFrame:
16def extract_mask_info(
17    mask: np.ndarray,
18    images: list[np.ndarray] = None,
19    image_labels: list[str] = None,
20    properties: list[str] = None,
21) -> pd.DataFrame:
22    """
23    Extracts events from a mask. Originated from @vishnu
24    :param mask: mask to extract events from
25    :param images: list of intensity images to extract from
26    :param image_labels: list of labels for images
27    :param properties: list of properties to extract in addition to the defaults:
28    label, centroid, axis_major_length. See
29    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
30    for additional properties.
31    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
32    """
33    # Return empty if the mask is empty
34    if np.max(mask) == 0:
35        return pd.DataFrame()
36    # Reshape any intensity images
37    if images is not None:
38        if isinstance(images, list):
39            images = np.stack(images, axis=-1)
40        if image_labels is not None and len(image_labels) != images.shape[-1]:
41            raise ValueError("Number of image labels must match number of images.")
42    # Accumulate any extra properties
43    base_properties = ["label", "centroid"]
44    if properties is not None:
45        properties = base_properties + properties
46    else:
47        properties = base_properties
48
49    # Use skimage.measure.regionprops_table to compute properties
50    info = pd.DataFrame(
51        regionprops_table(mask, intensity_image=images, properties=properties)
52    )
53
54    # Rename columns to match desired output
55    info = info.rename(
56        columns={
57            "label": "id",
58            "centroid-0": "y",
59            "centroid-1": "x",
60        },
61    )
62    # Also rename channel-specific columns if necessary
63    renamings = {}
64    for column in info.columns:
65        if column.find("-") != -1:
66            for i in range(len(image_labels)):
67                suffix = f"-{i}"
68                if column.endswith(suffix):
69                    renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
70    info = info.rename(columns=renamings)
71
72    return info

Extracts events from a mask. Originated from @vishnu

Parameters
Returns

pd.DataFrame with columns: id, x, y, size, or an empty DataFrame

def add_mask_overlay( images: numpy.ndarray | list[numpy.ndarray], mask: numpy.ndarray[numpy.uint8], overlay_color: tuple[float, float, float] = (0.8, 1, 0)):
 75def add_mask_overlay(
 76    images: np.ndarray | list[np.ndarray],
 77    mask: np.ndarray[np.uint8],
 78    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
 79):
 80    """
 81    Creates a 1-pixel wide border around the mask in the image.
 82    :param images: (H, W, 3), or (H, W) image or list of images.
 83    :param mask: (H, W) binary mask to overlay on the image.
 84    :param overlay_color: color of the outline for RGB images.
 85    Ignored for grayscale images.
 86    :return:
 87    """
 88    results = []
 89    # Temporarily make into a list
 90    return_array = False
 91    if isinstance(images, np.ndarray):
 92        images = [images]
 93        return_array = True
 94
 95    # Get the mask outline
 96    mask_kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8)
 97    outline = cv2.morphologyEx(mask, cv2.MORPH_DILATE, mask_kernel) - mask
 98
 99    # Add mask overlay to images
100    for image in images:
101        if image.shape[:2] != mask.shape:
102            raise ValueError("Image and mask must have the same shape.")
103        if np.issubdtype(image.dtype, np.unsignedinteger):
104            # Unsigned integer; scale outline to image range
105            this_outline = outline.astype(image.dtype) * np.iinfo(image.dtype).max
106        else:
107            # Floating point; scale outline to [0, 1]
108            this_outline = outline.astype(image.dtype)
109        if len(image.shape) == 3:
110            # (H, W, 3) RGB image
111            # Scale outline with color and match dtype
112            this_outline = np.stack([this_outline * c for c in overlay_color], axis=-1)
113            # Set outline to 0, then to outline color
114            result = image * np.stack([1 - outline] * 3, axis=-1)
115            result += this_outline.astype(image.dtype)
116        elif len(image.shape) == 2:
117            # (H, W) grayscale image
118            result = image * (1 - outline)
119            result += this_outline.astype(image.dtype)
120        results.append(result)
121
122    if return_array:
123        return results[0]
124    else:
125        return results

Creates a 1-pixel wide border around the mask in the image.

Parameters
  • images: (H, W, 3), or (H, W) image or list of images.
  • mask: (H, W) binary mask to overlay on the image.
  • overlay_color: color of the outline for RGB images. Ignored for grayscale images.
Returns
def make_rgb( images: list[numpy.ndarray], colors=list[tuple[float, float, float]], mask: numpy.ndarray[numpy.uint8] = None, mask_mode: Literal['overlay', 'hard'] = 'overlay', overlay_color: tuple[float, float, float] = (0.8, 1, 0)) -> numpy.ndarray:
128def make_rgb(
129    images: list[np.ndarray],
130    colors=list[tuple[float, float, float]],
131    mask: np.ndarray[np.uint8] = None,
132    mask_mode: Literal["overlay", "hard"] = "overlay",
133    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
134) -> np.ndarray:
135    """
136    Combine multiple channels into a single RGB image.
137    :param images: list of numpy arrays representing the channels.
138    :param colors: list of RGB tuples for each channel.
139    :param mask: numpy array representing a mask to overlay on the image.
140    :param mask_mode: whether to overlay the mask or use it as a hard mask.
141    :param overlay_color: color of the outline for RGB images.
142    Ignored for grayscale images.
143    :return: (H, W, 3) numpy array representing the RGB image.
144    """
145    if len(images) == 0:
146        raise ValueError("No images provided.")
147    if len(colors) == 0:
148        raise ValueError("No colors provided.")
149    if len(images) != len(colors):
150        raise ValueError("Number of images and colors must match.")
151    if not all([isinstance(image, np.ndarray) for image in images]):
152        raise ValueError("Images must be numpy arrays.")
153    if not all([len(c) == 3 for c in colors]):
154        raise ValueError("Colors must be RGB tuples.")
155
156    # Create an output with same shape and larger type to avoid overflow
157    dims = images[0].shape
158    dtype = images[0].dtype
159
160    if dtype == np.uint8:
161        temp_dtype = np.uint16
162    elif dtype == np.uint16:
163        temp_dtype = np.uint32
164    else:
165        temp_dtype = np.float64
166    rgb = np.zeros((*dims, 3), dtype=temp_dtype)
167
168    # Combine images with colors (can also be thought of as gains)
169    for image, color in zip(images, colors):
170        if image.shape != dims:
171            raise ValueError("All images must have the same shape.")
172        if image.dtype != dtype:
173            raise ValueError("All images must have the same dtype.")
174        rgb += np.stack([image * c for c in color], axis=-1).astype(temp_dtype)
175
176    # Cut off any overflow and convert back to original dtype
177    if np.issubdtype(dtype, np.unsignedinteger):
178        max_value = np.iinfo(dtype).max
179    else:
180        max_value = 1.0
181    rgb = np.clip(rgb, 0, max_value).astype(dtype)
182
183    # Add a mask if desired
184    if mask is not None:
185        if mask.shape != dims:
186            raise ValueError("Mask must have the same shape as the images.")
187        if mask_mode == "overlay":
188            rgb = add_mask_overlay(rgb, mask, overlay_color)
189        elif mask_mode == "hard":
190            rgb = rgb * np.stack([mask] * 3, axis=-1)
191        else:
192            raise ValueError("Mask mode must be 'overlay' or 'hard'.")
193
194    return rgb

Combine multiple channels into a single RGB image.

Parameters
  • images: list of numpy arrays representing the channels.
  • colors: list of RGB tuples for each channel.
  • mask: numpy array representing a mask to overlay on the image.
  • mask_mode: whether to overlay the mask or use it as a hard mask.
  • overlay_color: color of the outline for RGB images. Ignored for grayscale images.
Returns

(H, W, 3) numpy array representing the RGB image.

def make_montage( images: list[numpy.ndarray], order: list[int] | None, composites: dict[int, tuple[float, float, float]] | None, mask: numpy.ndarray[numpy.uint8] = None, labels: list[str] = None, mask_mode: Literal['overlay', 'hard'] = 'overlay', overlay_color: tuple[float, float, float] = (0.8, 1, 0), label_font: str = 'Roboto-Regular.ttf', label_size: int | float = 0.18, label_outline: bool = True, colored_labels: bool = True, border_size: int = 1, horizontal: bool = True, dtype=<class 'numpy.uint8'>) -> numpy.ndarray:
197def make_montage(
198    images: list[np.ndarray],
199    order: list[int] | None,
200    composites: dict[int, tuple[float, float, float]] | None,
201    mask: np.ndarray[np.uint8] = None,
202    labels: list[str] = None,
203    mask_mode: Literal["overlay", "hard"] = "overlay",
204    overlay_color: tuple[float, float, float] = (0.8, 1, 0),
205    label_font: str = "Roboto-Regular.ttf",
206    label_size: int | float = 0.18,
207    label_outline: bool = True,
208    colored_labels: bool = True,
209    border_size: int = 1,
210    horizontal: bool = True,
211    dtype=np.uint8,
212) -> np.ndarray:
213    """
214    Combine multiple images into a single montage based on order.
215    Can include a composite (always first).
216    :param images: list of numpy arrays representing the images.
217    :param order: list of indices for the images going into the montage or None.
218    :param composites: dictionary of indices and RGB tuples for a composite or None.
219    :param mask: numpy array representing a mask to overlay on the image.
220    :param mask_mode: whether to overlay the mask or use it as a hard mask.
221    :param overlay_color: color of the outline for RGB images. Ignored for grayscale.
222    :param labels: list of labels for the images. If length == len(order), will apply to
223    grayscale images only; if length == len(order) + 1 and composites exist, will apply
224    to all images.
225    :param label_font: path to a font file for labels. See PIL.ImageFont for details.
226    :param label_size: size of the font for labels. If a float, calculates a font size
227    as a fraction of the image size.
228    :param label_outline: whether to draw an outline around the label text.
229    :param colored_labels: whether to color the labels based on the composites.
230    :param border_size: width of the border between images.
231    :param horizontal: whether to stack images horizontally or vertically.
232    :param dtype: the dtype of the output montage.
233    :return: numpy array representing the montage.
234    """
235    if len(images) == 0:
236        raise ValueError("No images provided.")
237    if not all([isinstance(image, np.ndarray) for image in images]):
238        raise ValueError("Images must be numpy arrays.")
239    if not all([len(image.shape) == 2 for image in images]):
240        raise ValueError("Images must be 2D.")
241    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
242        raise ValueError("Composites must be RGB tuples.")
243
244    n_images = len(order) if order is not None else 0
245    n_images += 1 if composites is not None else 0
246
247    if n_images == 0:
248        raise ValueError("No images or composites requested.")
249
250    # Adapt label font size if necessary
251    if isinstance(label_size, float):
252        label_size = int(images[0].shape[1] * label_size)
253
254    # Populate the montage with black
255    montage = np.full(
256        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
257        np.iinfo(dtype).max,  # White fill
258        dtype=dtype,
259    )
260
261    # Load font if necessary
262    global opened_font
263    if labels is not None and len(order) <= len(labels) <= n_images:
264        if (
265            opened_font is None
266            or opened_font.path != label_font
267            or opened_font.size != label_size
268        ):
269            try:
270                opened_font = ImageFont.truetype(label_font, label_size)
271            except OSError:
272                warnings.warn(f"Could not load font {label_font}. Using defaults.")
273                opened_font = ImageFont.load_default(label_size)
274    elif labels is not None:
275        raise ValueError("Number of labels must be 0, match order, or match images.")
276
277    # Populate the montage with images
278    offset = border_size  # Keeps track of the offset for the next image
279    image_height, image_width = images[0].shape
280
281    # Composite first
282    if composites is not None and len(composites) > 0:
283        image = make_rgb(
284            [images[i] for i in composites.keys()],
285            list(composites.values()),
286            mask,
287            mask_mode,
288            overlay_color,
289        )
290
291        if labels is not None and len(labels) == n_images:
292            image = scale_bit_depth(image, np.uint8)  # Required for PIL
293            # Draw a label on the composite
294            pillow_image = Image.fromarray(image)
295            # Determine the fill color based on the average intensity of the image
296            included_height = max(label_size * 2, image.shape[1])
297            if get_image_lightness(image[:, -included_height:, :]) > 50:
298                text_fill = (0, 0, 0)
299                outline_fill = (255, 255, 255)
300            else:
301                text_fill = (255, 255, 255)
302                outline_fill = (0, 0, 0)
303            draw = ImageDraw.Draw(pillow_image, "RGB")
304            draw.text(
305                (image.shape[0] // 2, image.shape[1]),
306                labels[0],
307                fill=text_fill,
308                anchor="md",  # Middle, descender (absolute bottom of font)
309                font=opened_font,
310                stroke_width=round(label_size / 10) if label_outline else 0,
311                stroke_fill=outline_fill,
312            )
313            image = np.asarray(pillow_image)
314            labels = labels[1:]
315
316        # Scale to desired dtype
317        image = scale_bit_depth(image, dtype)
318
319        if horizontal:
320            montage[
321                border_size : border_size + image_height,
322                offset : offset + image_width,
323            ] = image
324            offset += image_width + border_size
325        else:
326            montage[
327                offset : offset + image_height,
328                border_size : border_size + image_width,
329            ] = image
330            offset += image_height + border_size
331
332    # Grayscale order next
333    order = [] if order is None else order
334    for i, o in enumerate(order):
335        image = images[o]
336        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
337
338        if mask is not None:
339            if mask_mode == "overlay":
340                image = add_mask_overlay(image, mask, overlay_color)
341            elif mask_mode == "hard":
342                image *= np.stack([mask] * 3, axis=-1)
343            else:
344                raise ValueError("Mask mode must be 'overlay' or 'hard'.")
345
346        if labels is not None and len(labels) == len(order):
347            image = scale_bit_depth(image, np.uint8)  # Required for PIL
348            pillow_image = Image.fromarray(image)
349            if colored_labels and o in composites:
350                text_fill = tuple(round(255 * rgb_f) for rgb_f in composites[o])
351                if get_lightness(composites[o]) > 50:
352                    outline_fill = (0, 0, 0)
353                else:
354                    outline_fill = (255, 255, 255)
355            else:
356                # Determine the color based on the average intensity of the image
357                included_height = max(label_size * 2, image.shape[1])
358                if get_image_lightness(image[:, -included_height:, :]) > 50:
359                    text_fill = (0, 0, 0)
360                    outline_fill = (255, 255, 255)
361                else:
362                    text_fill = (255, 255, 255)
363                    outline_fill = (0, 0, 0)
364            draw = ImageDraw.Draw(pillow_image, "RGB")
365            draw.text(
366                (image.shape[0] // 2, image.shape[1]),
367                labels[i],
368                fill=text_fill,
369                anchor="md",  # Middle, descender (absolute bottom of font)
370                font=opened_font,
371                stroke_width=round(label_size / 10) if label_outline else 0,
372                stroke_fill=outline_fill,
373            )
374            image = np.asarray(pillow_image)
375
376        # Scale to desired dtype
377        image = scale_bit_depth(image, dtype)
378
379        if horizontal:
380            montage[
381                border_size : border_size + image_height,
382                offset : offset + image_width,
383            ] = image
384            offset += image_width + border_size
385        else:
386            montage[
387                offset : offset + image_height,
388                border_size : border_size + image_width,
389            ] = image
390            offset += image_height + border_size
391
392    return montage

Combine multiple images into a single montage based on order. Can include a composite (always first).

Parameters
  • images: list of numpy arrays representing the images.
  • order: list of indices for the images going into the montage or None.
  • composites: dictionary of indices and RGB tuples for a composite or None.
  • mask: numpy array representing a mask to overlay on the image.
  • mask_mode: whether to overlay the mask or use it as a hard mask.
  • overlay_color: color of the outline for RGB images. Ignored for grayscale.
  • labels: list of labels for the images. If length == len(order), will apply to grayscale images only; if length == len(order) + 1 and composites exist, will apply to all images.
  • label_font: path to a font file for labels. See PIL.ImageFont for details.
  • label_size: size of the font for labels. If a float, calculates a font size as a fraction of the image size.
  • label_outline: whether to draw an outline around the label text.
  • colored_labels: whether to color the labels based on the composites.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
  • dtype: the dtype of the output montage.
Returns

numpy array representing the montage.

def get_montage_shape( image_shape: tuple[int, int], n_images: int, border_size: int = 1, horizontal: bool = True) -> tuple[int, int, int]:
395def get_montage_shape(
396    image_shape: tuple[int, int],
397    n_images: int,
398    border_size: int = 1,
399    horizontal: bool = True,
400) -> tuple[int, int, int]:
401    """
402    Determine the size of the montage based on the images and order.
403    :param image_shape: tuple of height, width of the base images going into the montage.
404    :param n_images: how many images are going into the montage, including composite.
405    :param border_size: width of the border between images.
406    :param horizontal: whether to stack images horizontally or vertically.
407    :return: tuple of the height, width, and channels (always 3) of the montage.
408    """
409    if len(image_shape) != 2:
410        raise ValueError("Image shape must be a tuple of height, width.")
411    if image_shape[0] < 1 or image_shape[1] < 1:
412        raise ValueError("Image shape must be positive.")
413    if not isinstance(n_images, int) or n_images < 1:
414        raise ValueError("Number of images must be a positive integer.")
415
416    # Determine the size of the montage
417    if horizontal:
418        n_rows = 1
419        n_cols = n_images
420    else:
421        n_rows = n_images
422        n_cols = 1
423
424    # Determine the montage size
425    image_height, image_width = image_shape
426    montage_height = n_rows * image_height + (n_rows + 1) * border_size
427    montage_width = n_cols * image_width + (n_cols + 1) * border_size
428
429    return montage_height, montage_width, 3  # 3 for RGB

Determine the size of the montage based on the images and order.

Parameters
  • image_shape: tuple of height, width of the base images going into the montage.
  • n_images: how many images are going into the montage, including composite.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
Returns

tuple of the height, width, and channels (always 3) of the montage.

def scale_bit_depth( image: numpy.ndarray, dtype: numpy.dtype, real_bits: int = None) -> numpy.ndarray:
432def scale_bit_depth(
433    image: np.ndarray, dtype: np.dtype, real_bits: int = None
434) -> np.ndarray:
435    """
436    Converts the image to the desired bit depth, factoring in real bit depth.
437    :param image: numpy array representing the image.
438    :param dtype: the desired dtype of the image.
439    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
440    :return: numpy array representing the image with the new dtype.
441    """
442    if not isinstance(image, np.ndarray):
443        raise ValueError("Image must be a numpy array.")
444    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
445        image.dtype, np.floating
446    ):
447        raise ValueError("Input image dtype must be an unsigned integer or float.")
448    if np.issubdtype(image.dtype, np.floating) and (
449        np.min(image) < 0 or np.max(image) > 1
450    ):
451        raise ValueError("Image values must be between 0 and 1.")
452    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
453        dtype, np.floating
454    ):
455        raise ValueError("Output dtype must be an unsigned integer or float.")
456
457    # First, determine the scaling required for the real bit depth
458    scale = 1
459    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
460        dtype_bit_depth = np.iinfo(image.dtype).bits
461        if real_bits > dtype_bit_depth:
462            raise ValueError("Real bits must be less than or equal to image bit depth")
463        elif real_bits < dtype_bit_depth:
464            # We should scale up the values to the new bit depth
465            if np.max(image) > 2**real_bits:
466                raise ValueError("Image values exceed real bit depth; already scaled?")
467            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
468
469    # Already validated that the min is 0; determine the max
470    if np.issubdtype(image.dtype, np.unsignedinteger):
471        in_max = np.iinfo(image.dtype).max
472    else:
473        in_max = 1.0
474    if np.issubdtype(dtype, np.unsignedinteger):
475        out_max = np.iinfo(dtype).max
476    else:
477        out_max = 1.0
478
479    # Scale the image to the new bit depth
480    scale = scale * out_max / in_max
481    image = (image * scale).astype(dtype)
482    return image

Converts the image to the desired bit depth, factoring in real bit depth.

Parameters
  • image: numpy array representing the image.
  • dtype: the desired dtype of the image.
  • real_bits: the actual bit depth of the image, such as from a 14-bit camera.
Returns

numpy array representing the image with the new dtype.

def get_image_lightness(image: numpy.ndarray) -> float:
485def get_image_lightness(image: np.ndarray) -> float:
486    """
487    Calculate the lightness of an sRGB image, taking shortcuts for speed.
488    :param image: numpy array representing the sRGB image.
489    :return: approximate perceived lightness of the image, from 0 to 100.
490    """
491    # Scale image to [0, 1]
492    if np.issubdtype(image.dtype, np.unsignedinteger):
493        image = image / np.iinfo(image.dtype).max
494    # Rough conversion to linear RGB
495    image = image**2.2
496    # Average to a single color and return that color's lightness
497    color = np.mean(image, axis=(0, 1))
498    return get_lightness((color[0], color[1], color[2]), srgb=False)

Calculate the lightness of an sRGB image, taking shortcuts for speed.

Parameters
  • image: numpy array representing the sRGB image.
Returns

approximate perceived lightness of the image, from 0 to 100.

def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
501def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
502    """
503    Calculate the lightness of an sRGB color, taking shortcuts for speed.
504    :param color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
505    :param srgb: whether the color is in sRGB or linear RGB.
506    :return: approximate perceived lightness of the color, from 0 to 100.
507    """
508    if srgb:
509        # Convert to linear color, rough and quick
510        rgb = color[0] ** 2.2, color[1] ** 2.2, color[2] ** 2.2
511    else:
512        rgb = color
513    # Convert to luminance
514    luminance = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
515    # Convert to perceived lightness
516    if luminance <= 0.008856:
517        return 903.3 * luminance
518    else:
519        return 116 * luminance ** (1 / 3) - 16

Calculate the lightness of an sRGB color, taking shortcuts for speed.

Parameters
  • color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
  • srgb: whether the color is in sRGB or linear RGB.
Returns

approximate perceived lightness of the color, from 0 to 100.