csi_images.csi_images

  1import numpy as np
  2import pandas as pd
  3
  4from PIL import Image, ImageFont, ImageDraw
  5from skimage.measure import regionprops_table
  6
  7# Avoid opening multiple fonts and re-opening fonts
  8opened_font: ImageFont.FreeTypeFont | None = None
  9
 10
 11def extract_mask_info(
 12    mask: np.ndarray,
 13    images: list[np.ndarray] = None,
 14    image_labels: list[str] = None,
 15    properties: list[str] = None,
 16) -> pd.DataFrame:
 17    """
 18    Extracts events from a mask. Originated from @vishnu
 19    :param mask: mask to extract events from
 20    :param images: list of intensity images to extract from
 21    :param image_labels: list of labels for images
 22    :param properties: list of properties to extract in addition to the defaults:
 23    label, centroid, axis_major_length. See
 24    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
 25    for additional properties.
 26    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
 27    """
 28    # Return empty if the mask is empty
 29    if np.max(mask) == 0:
 30        return pd.DataFrame()
 31    # Reshape any intensity images
 32    if images is not None:
 33        if isinstance(images, list):
 34            images = np.stack(images, axis=-1)
 35        if image_labels is not None and len(image_labels) != images.shape[-1]:
 36            raise ValueError("Number of image labels must match number of images.")
 37    # Accumulate any extra properties
 38    base_properties = ["label", "centroid"]
 39    if properties is not None:
 40        properties = base_properties + properties
 41    else:
 42        properties = base_properties
 43
 44    # Use skimage.measure.regionprops_table to compute properties
 45    info = pd.DataFrame(
 46        regionprops_table(mask, intensity_image=images, properties=properties)
 47    )
 48
 49    # Rename columns to match desired output
 50    info = info.rename(
 51        columns={
 52            "label": "id",
 53            "centroid-0": "y",
 54            "centroid-1": "x",
 55        },
 56    )
 57    renamings = {}
 58    for column in info.columns:
 59        for i in range(len(image_labels)):
 60            suffix = f"-{i}"
 61            if column.endswith(suffix):
 62                renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
 63    info = info.rename(columns=renamings)
 64
 65    return info
 66
 67
 68def make_rgb(
 69    images: list[np.ndarray], colors=list[tuple[float, float, float]]
 70) -> np.ndarray:
 71    """
 72    Combine multiple channels into a single RGB image.
 73    :param images: list of numpy arrays representing the channels.
 74    :param colors: list of RGB tuples for each channel.
 75    :return:
 76    """
 77    if len(images) == 0:
 78        raise ValueError("No images provided.")
 79    if len(colors) == 0:
 80        raise ValueError("No colors provided.")
 81    if len(images) != len(colors):
 82        raise ValueError("Number of images and colors must match.")
 83    if not all([isinstance(image, np.ndarray) for image in images]):
 84        raise ValueError("Images must be numpy arrays.")
 85    if not all([len(c) == 3 for c in colors]):
 86        raise ValueError("Colors must be RGB tuples.")
 87
 88    # Create an output with same shape and larger type to avoid overflow
 89    dims = images[0].shape
 90    dtype = images[0].dtype
 91    if dtype not in [np.uint8, np.uint16]:
 92        raise ValueError("Image dtype must be uint8 or uint16.")
 93    rgb = np.zeros((*dims, 3), dtype=np.uint16 if dtype == np.uint8 else np.uint32)
 94
 95    # Combine images with colors (can also be thought of as gains)
 96    for image, color in zip(images, colors):
 97        if image.shape != dims:
 98            raise ValueError("All images must have the same shape.")
 99        if image.dtype != dtype:
100            raise ValueError("All images must have the same dtype.")
101        rgb[..., 0] += (image * color[0]).astype(rgb.dtype)
102        rgb[..., 1] += (image * color[1]).astype(rgb.dtype)
103        rgb[..., 2] += (image * color[2]).astype(rgb.dtype)
104
105    # Cut off any overflow and convert back to original dtype
106    rgb = np.clip(rgb, np.iinfo(dtype).min, np.iinfo(dtype).max).astype(dtype)
107    return rgb
108
109
110def make_montage(
111    images: list[np.ndarray],
112    order: list[int] | None,
113    composites: dict[int, tuple[float, float, float]] | None,
114    labels: list[str] = None,
115    label_font: str = "Roboto-Regular.ttf",
116    label_size: int | float = 0.18,
117    label_outline: bool = True,
118    colored_labels: bool = True,
119    border_size: int = 1,
120    horizontal: bool = True,
121    dtype=np.uint8,
122) -> np.ndarray:
123    """
124    Combine multiple images into a single montage based on order.
125    Can include a composite (always first).
126    :param images: list of numpy arrays representing the images.
127    :param order: list of indices for the images going into the montage or None.
128    :param composites: dictionary of indices and RGB tuples for a composite or None.
129    :param labels: list of labels for the images. If length == len(order), will apply to
130    grayscale images only; if length == len(order) + 1 and composites exist, will apply
131    to all images.
132    :param label_font: path to a font file for labels. See PIL.ImageFont for details.
133    :param label_size: size of the font for labels. If a float, calculates a font size
134    as a fraction of the image size.
135    :param label_outline: whether to draw an outline around the label text.
136    :param colored_labels: whether to color the labels based on the composites.
137    :param border_size: width of the border between images.
138    :param horizontal: whether to stack images horizontally or vertically.
139    :param dtype: the dtype of the output montage.
140    :return: numpy array representing the montage.
141    """
142    if len(images) == 0:
143        raise ValueError("No images provided.")
144    if not all([isinstance(image, np.ndarray) for image in images]):
145        raise ValueError("Images must be numpy arrays.")
146    if not all([len(image.shape) == 2 for image in images]):
147        raise ValueError("Images must be 2D.")
148    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
149        raise ValueError("Composites must be RGB tuples.")
150
151    n_images = len(order) if order is not None else 0
152    n_images += 1 if composites is not None else 0
153
154    if n_images == 0:
155        raise ValueError("No images or composites requested.")
156
157    # Adapt label font size if necessary
158    if isinstance(label_size, float):
159        label_size = int(images[0].shape[1] * label_size)
160
161    # Populate the montage with black
162    montage = np.full(
163        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
164        np.iinfo(dtype).max,  # White fill
165        dtype=dtype,
166    )
167
168    # Load font if necessary
169    global opened_font
170    if labels is not None and len(order) <= len(labels) <= n_images:
171        if (
172            opened_font is None
173            or opened_font.path != label_font
174            or opened_font.size != label_size
175        ):
176            opened_font = ImageFont.truetype(label_font, label_size)
177    elif labels is not None:
178        raise ValueError("Number of labels must be 0, match order, or match images.")
179
180    # Populate the montage with images
181    offset = border_size  # Keeps track of the offset for the next image
182    image_height, image_width = images[0].shape
183
184    # Composite first
185    if composites is not None and len(composites) > 0:
186        image = make_rgb(
187            [images[i] for i in composites.keys()],
188            list(composites.values()),
189        )
190        image = scale_bit_depth(image, dtype)
191        if labels is not None and len(labels) == n_images:
192            # Draw a label on the composite
193            pillow_image = Image.fromarray(image)
194            # Determine the fill color based on the average intensity of the image
195            included_height = max(label_size * 2, image.shape[1])
196            if get_image_lightness(image[:, -included_height:, :]) > 50:
197                text_fill = (0, 0, 0)
198                outline_fill = (255, 255, 255)
199            else:
200                text_fill = (255, 255, 255)
201                outline_fill = (0, 0, 0)
202            draw = ImageDraw.Draw(pillow_image, "RGB")
203            draw.text(
204                (image.shape[0] // 2, image.shape[1]),
205                labels[0],
206                fill=text_fill,
207                anchor="md",  # Middle, descender (absolute bottom of font)
208                font=opened_font,
209                stroke_width=round(label_size / 10) if label_outline else 0,
210                stroke_fill=outline_fill,
211            )
212            image = np.asarray(pillow_image)
213            labels = labels[1:]
214
215        if horizontal:
216            montage[
217                border_size : border_size + image_height,
218                offset : offset + image_width,
219            ] = image
220            offset += image_width + border_size
221        else:
222            montage[
223                offset : offset + image_height,
224                border_size : border_size + image_width,
225            ] = image
226            offset += image_height + border_size
227
228    # Grayscale order next
229    order = [] if order is None else order
230    for i, o in enumerate(order):
231        image = images[o]
232        image = scale_bit_depth(image, dtype)
233        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
234
235        if labels is not None and len(labels) == len(order):
236            pillow_image = Image.fromarray(image)
237            if colored_labels and o in composites:
238                text_fill = tuple(round(255 * rgb_f) for rgb_f in composites[o])
239                if get_lightness(composites[o]) > 50:
240                    outline_fill = (0, 0, 0)
241                else:
242                    outline_fill = (255, 255, 255)
243            else:
244                # Determine the color based on the average intensity of the image
245                included_height = max(label_size * 2, image.shape[1])
246                if get_image_lightness(image[:, -included_height:, :]) > 50:
247                    text_fill = (0, 0, 0)
248                    outline_fill = (255, 255, 255)
249                else:
250                    text_fill = (255, 255, 255)
251                    outline_fill = (0, 0, 0)
252            draw = ImageDraw.Draw(pillow_image, "RGB")
253            draw.text(
254                (image.shape[0] // 2, image.shape[1]),
255                labels[i],
256                fill=text_fill,
257                anchor="md",  # Middle, descender (absolute bottom of font)
258                font=opened_font,
259                stroke_width=round(label_size / 10) if label_outline else 0,
260                stroke_fill=outline_fill,
261            )
262            image = np.asarray(pillow_image)
263
264        if horizontal:
265            montage[
266                border_size : border_size + image_height,
267                offset : offset + image_width,
268            ] = image
269            offset += image_width + border_size
270        else:
271            montage[
272                offset : offset + image_height,
273                border_size : border_size + image_width,
274            ] = image
275            offset += image_height + border_size
276
277    return montage
278
279
280def get_montage_shape(
281    image_shape: tuple[int, int],
282    n_images: int,
283    border_size: int = 1,
284    horizontal: bool = True,
285) -> tuple[int, int, int]:
286    """
287    Determine the size of the montage based on the images and order.
288    :param image_shape: tuple of height, width of the base images going into the montage.
289    :param n_images: how many images are going into the montage, including composite.
290    :param border_size: width of the border between images.
291    :param horizontal: whether to stack images horizontally or vertically.
292    :return: tuple of the height, width, and channels (always 3) of the montage.
293    """
294    if len(image_shape) != 2:
295        raise ValueError("Image shape must be a tuple of height, width.")
296    if image_shape[0] < 1 or image_shape[1] < 1:
297        raise ValueError("Image shape must be positive.")
298    if not isinstance(n_images, int) or n_images < 1:
299        raise ValueError("Number of images must be a positive integer.")
300
301    # Determine the size of the montage
302    if horizontal:
303        n_rows = 1
304        n_cols = n_images
305    else:
306        n_rows = n_images
307        n_cols = 1
308
309    # Determine the montage size
310    image_height, image_width = image_shape
311    montage_height = n_rows * image_height + (n_rows + 1) * border_size
312    montage_width = n_cols * image_width + (n_cols + 1) * border_size
313
314    return montage_height, montage_width, 3  # 3 for RGB
315
316
317def scale_bit_depth(
318    image: np.ndarray, dtype: np.dtype, real_bits: int = None
319) -> np.ndarray:
320    """
321    Converts the image to the desired bit depth, factoring in real bit depth.
322    :param image: numpy array representing the image.
323    :param dtype: the desired dtype of the image.
324    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
325    :return: numpy array representing the image with the new dtype.
326    """
327    if not isinstance(image, np.ndarray):
328        raise ValueError("Image must be a numpy array.")
329    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
330        image.dtype, np.floating
331    ):
332        raise ValueError("Input image dtype must be an unsigned integer or float.")
333    if np.issubdtype(image.dtype, np.floating) and (
334        np.min(image) < 0 or np.max(image) > 1
335    ):
336        raise ValueError("Image values must be between 0 and 1.")
337    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
338        dtype, np.floating
339    ):
340        raise ValueError("Output dtype must be an unsigned integer or float.")
341
342    # First, determine the scaling required for the real bit depth
343    scale = 1
344    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
345        dtype_bit_depth = np.iinfo(image.dtype).bits
346        if real_bits > dtype_bit_depth:
347            raise ValueError("Real bits must be less than or equal to image bit depth")
348        elif real_bits < dtype_bit_depth:
349            # We should scale up the values to the new bit depth
350            if np.max(image) > 2**real_bits:
351                raise ValueError("Image values exceed real bit depth; already scaled?")
352            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
353
354    # Already validated that the min is 0; determine the max
355    if np.issubdtype(image.dtype, np.unsignedinteger):
356        in_max = np.iinfo(image.dtype).max
357    else:
358        in_max = 1.0
359    if np.issubdtype(dtype, np.unsignedinteger):
360        out_max = np.iinfo(dtype).max
361    else:
362        out_max = 1.0
363
364    # Scale the image to the new bit depth
365    scale = scale * out_max / in_max
366    image = (image * scale).astype(dtype)
367    return image
368
369
370def get_image_lightness(image: np.ndarray) -> float:
371    """
372    Calculate the lightness of an sRGB image, taking shortcuts for speed.
373    :param image: numpy array representing the sRGB image.
374    :return: approximate perceived lightness of the image, from 0 to 100.
375    """
376    # Scale image to [0, 1]
377    if np.issubdtype(image.dtype, np.unsignedinteger):
378        image = image / np.iinfo(image.dtype).max
379    # Rough conversion to linear RGB
380    image = image**2.2
381    # Average to a single color and return that color's lightness
382    color = np.mean(image, axis=(0, 1))
383    return get_lightness((color[0], color[1], color[2]), srgb=False)
384
385
386def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
387    """
388    Calculate the lightness of an sRGB color, taking shortcuts for speed.
389    :param color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
390    :param srgb: whether the color is in sRGB or linear RGB.
391    :return: approximate perceived lightness of the color, from 0 to 100.
392    """
393    if srgb:
394        # Convert to linear color, rough and quick
395        rgb = color[0] ** 2.2, color[1] ** 2.2, color[2] ** 2.2
396    else:
397        rgb = color
398    # Convert to luminance
399    luminance = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
400    # Convert to perceived lightness
401    if luminance <= 0.008856:
402        return 903.3 * luminance
403    else:
404        return 116 * luminance ** (1 / 3) - 16
opened_font: PIL.ImageFont.FreeTypeFont | None = None
def extract_mask_info( mask: numpy.ndarray, images: list[numpy.ndarray] = None, image_labels: list[str] = None, properties: list[str] = None) -> pandas.core.frame.DataFrame:
12def extract_mask_info(
13    mask: np.ndarray,
14    images: list[np.ndarray] = None,
15    image_labels: list[str] = None,
16    properties: list[str] = None,
17) -> pd.DataFrame:
18    """
19    Extracts events from a mask. Originated from @vishnu
20    :param mask: mask to extract events from
21    :param images: list of intensity images to extract from
22    :param image_labels: list of labels for images
23    :param properties: list of properties to extract in addition to the defaults:
24    label, centroid, axis_major_length. See
25    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
26    for additional properties.
27    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
28    """
29    # Return empty if the mask is empty
30    if np.max(mask) == 0:
31        return pd.DataFrame()
32    # Reshape any intensity images
33    if images is not None:
34        if isinstance(images, list):
35            images = np.stack(images, axis=-1)
36        if image_labels is not None and len(image_labels) != images.shape[-1]:
37            raise ValueError("Number of image labels must match number of images.")
38    # Accumulate any extra properties
39    base_properties = ["label", "centroid"]
40    if properties is not None:
41        properties = base_properties + properties
42    else:
43        properties = base_properties
44
45    # Use skimage.measure.regionprops_table to compute properties
46    info = pd.DataFrame(
47        regionprops_table(mask, intensity_image=images, properties=properties)
48    )
49
50    # Rename columns to match desired output
51    info = info.rename(
52        columns={
53            "label": "id",
54            "centroid-0": "y",
55            "centroid-1": "x",
56        },
57    )
58    renamings = {}
59    for column in info.columns:
60        for i in range(len(image_labels)):
61            suffix = f"-{i}"
62            if column.endswith(suffix):
63                renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
64    info = info.rename(columns=renamings)
65
66    return info

Extracts events from a mask. Originated from @vishnu

Parameters
Returns

pd.DataFrame with columns: id, x, y, size, or an empty DataFrame

def make_rgb( images: list[numpy.ndarray], colors=list[tuple[float, float, float]]) -> numpy.ndarray:
 69def make_rgb(
 70    images: list[np.ndarray], colors=list[tuple[float, float, float]]
 71) -> np.ndarray:
 72    """
 73    Combine multiple channels into a single RGB image.
 74    :param images: list of numpy arrays representing the channels.
 75    :param colors: list of RGB tuples for each channel.
 76    :return:
 77    """
 78    if len(images) == 0:
 79        raise ValueError("No images provided.")
 80    if len(colors) == 0:
 81        raise ValueError("No colors provided.")
 82    if len(images) != len(colors):
 83        raise ValueError("Number of images and colors must match.")
 84    if not all([isinstance(image, np.ndarray) for image in images]):
 85        raise ValueError("Images must be numpy arrays.")
 86    if not all([len(c) == 3 for c in colors]):
 87        raise ValueError("Colors must be RGB tuples.")
 88
 89    # Create an output with same shape and larger type to avoid overflow
 90    dims = images[0].shape
 91    dtype = images[0].dtype
 92    if dtype not in [np.uint8, np.uint16]:
 93        raise ValueError("Image dtype must be uint8 or uint16.")
 94    rgb = np.zeros((*dims, 3), dtype=np.uint16 if dtype == np.uint8 else np.uint32)
 95
 96    # Combine images with colors (can also be thought of as gains)
 97    for image, color in zip(images, colors):
 98        if image.shape != dims:
 99            raise ValueError("All images must have the same shape.")
100        if image.dtype != dtype:
101            raise ValueError("All images must have the same dtype.")
102        rgb[..., 0] += (image * color[0]).astype(rgb.dtype)
103        rgb[..., 1] += (image * color[1]).astype(rgb.dtype)
104        rgb[..., 2] += (image * color[2]).astype(rgb.dtype)
105
106    # Cut off any overflow and convert back to original dtype
107    rgb = np.clip(rgb, np.iinfo(dtype).min, np.iinfo(dtype).max).astype(dtype)
108    return rgb

Combine multiple channels into a single RGB image.

Parameters
  • images: list of numpy arrays representing the channels.
  • colors: list of RGB tuples for each channel.
Returns
def make_montage( images: list[numpy.ndarray], order: list[int] | None, composites: dict[int, tuple[float, float, float]] | None, labels: list[str] = None, label_font: str = 'Roboto-Regular.ttf', label_size: int | float = 0.18, label_outline: bool = True, colored_labels: bool = True, border_size: int = 1, horizontal: bool = True, dtype=<class 'numpy.uint8'>) -> numpy.ndarray:
111def make_montage(
112    images: list[np.ndarray],
113    order: list[int] | None,
114    composites: dict[int, tuple[float, float, float]] | None,
115    labels: list[str] = None,
116    label_font: str = "Roboto-Regular.ttf",
117    label_size: int | float = 0.18,
118    label_outline: bool = True,
119    colored_labels: bool = True,
120    border_size: int = 1,
121    horizontal: bool = True,
122    dtype=np.uint8,
123) -> np.ndarray:
124    """
125    Combine multiple images into a single montage based on order.
126    Can include a composite (always first).
127    :param images: list of numpy arrays representing the images.
128    :param order: list of indices for the images going into the montage or None.
129    :param composites: dictionary of indices and RGB tuples for a composite or None.
130    :param labels: list of labels for the images. If length == len(order), will apply to
131    grayscale images only; if length == len(order) + 1 and composites exist, will apply
132    to all images.
133    :param label_font: path to a font file for labels. See PIL.ImageFont for details.
134    :param label_size: size of the font for labels. If a float, calculates a font size
135    as a fraction of the image size.
136    :param label_outline: whether to draw an outline around the label text.
137    :param colored_labels: whether to color the labels based on the composites.
138    :param border_size: width of the border between images.
139    :param horizontal: whether to stack images horizontally or vertically.
140    :param dtype: the dtype of the output montage.
141    :return: numpy array representing the montage.
142    """
143    if len(images) == 0:
144        raise ValueError("No images provided.")
145    if not all([isinstance(image, np.ndarray) for image in images]):
146        raise ValueError("Images must be numpy arrays.")
147    if not all([len(image.shape) == 2 for image in images]):
148        raise ValueError("Images must be 2D.")
149    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
150        raise ValueError("Composites must be RGB tuples.")
151
152    n_images = len(order) if order is not None else 0
153    n_images += 1 if composites is not None else 0
154
155    if n_images == 0:
156        raise ValueError("No images or composites requested.")
157
158    # Adapt label font size if necessary
159    if isinstance(label_size, float):
160        label_size = int(images[0].shape[1] * label_size)
161
162    # Populate the montage with black
163    montage = np.full(
164        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
165        np.iinfo(dtype).max,  # White fill
166        dtype=dtype,
167    )
168
169    # Load font if necessary
170    global opened_font
171    if labels is not None and len(order) <= len(labels) <= n_images:
172        if (
173            opened_font is None
174            or opened_font.path != label_font
175            or opened_font.size != label_size
176        ):
177            opened_font = ImageFont.truetype(label_font, label_size)
178    elif labels is not None:
179        raise ValueError("Number of labels must be 0, match order, or match images.")
180
181    # Populate the montage with images
182    offset = border_size  # Keeps track of the offset for the next image
183    image_height, image_width = images[0].shape
184
185    # Composite first
186    if composites is not None and len(composites) > 0:
187        image = make_rgb(
188            [images[i] for i in composites.keys()],
189            list(composites.values()),
190        )
191        image = scale_bit_depth(image, dtype)
192        if labels is not None and len(labels) == n_images:
193            # Draw a label on the composite
194            pillow_image = Image.fromarray(image)
195            # Determine the fill color based on the average intensity of the image
196            included_height = max(label_size * 2, image.shape[1])
197            if get_image_lightness(image[:, -included_height:, :]) > 50:
198                text_fill = (0, 0, 0)
199                outline_fill = (255, 255, 255)
200            else:
201                text_fill = (255, 255, 255)
202                outline_fill = (0, 0, 0)
203            draw = ImageDraw.Draw(pillow_image, "RGB")
204            draw.text(
205                (image.shape[0] // 2, image.shape[1]),
206                labels[0],
207                fill=text_fill,
208                anchor="md",  # Middle, descender (absolute bottom of font)
209                font=opened_font,
210                stroke_width=round(label_size / 10) if label_outline else 0,
211                stroke_fill=outline_fill,
212            )
213            image = np.asarray(pillow_image)
214            labels = labels[1:]
215
216        if horizontal:
217            montage[
218                border_size : border_size + image_height,
219                offset : offset + image_width,
220            ] = image
221            offset += image_width + border_size
222        else:
223            montage[
224                offset : offset + image_height,
225                border_size : border_size + image_width,
226            ] = image
227            offset += image_height + border_size
228
229    # Grayscale order next
230    order = [] if order is None else order
231    for i, o in enumerate(order):
232        image = images[o]
233        image = scale_bit_depth(image, dtype)
234        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
235
236        if labels is not None and len(labels) == len(order):
237            pillow_image = Image.fromarray(image)
238            if colored_labels and o in composites:
239                text_fill = tuple(round(255 * rgb_f) for rgb_f in composites[o])
240                if get_lightness(composites[o]) > 50:
241                    outline_fill = (0, 0, 0)
242                else:
243                    outline_fill = (255, 255, 255)
244            else:
245                # Determine the color based on the average intensity of the image
246                included_height = max(label_size * 2, image.shape[1])
247                if get_image_lightness(image[:, -included_height:, :]) > 50:
248                    text_fill = (0, 0, 0)
249                    outline_fill = (255, 255, 255)
250                else:
251                    text_fill = (255, 255, 255)
252                    outline_fill = (0, 0, 0)
253            draw = ImageDraw.Draw(pillow_image, "RGB")
254            draw.text(
255                (image.shape[0] // 2, image.shape[1]),
256                labels[i],
257                fill=text_fill,
258                anchor="md",  # Middle, descender (absolute bottom of font)
259                font=opened_font,
260                stroke_width=round(label_size / 10) if label_outline else 0,
261                stroke_fill=outline_fill,
262            )
263            image = np.asarray(pillow_image)
264
265        if horizontal:
266            montage[
267                border_size : border_size + image_height,
268                offset : offset + image_width,
269            ] = image
270            offset += image_width + border_size
271        else:
272            montage[
273                offset : offset + image_height,
274                border_size : border_size + image_width,
275            ] = image
276            offset += image_height + border_size
277
278    return montage

Combine multiple images into a single montage based on order. Can include a composite (always first).

Parameters
  • images: list of numpy arrays representing the images.
  • order: list of indices for the images going into the montage or None.
  • composites: dictionary of indices and RGB tuples for a composite or None.
  • labels: list of labels for the images. If length == len(order), will apply to grayscale images only; if length == len(order) + 1 and composites exist, will apply to all images.
  • label_font: path to a font file for labels. See PIL.ImageFont for details.
  • label_size: size of the font for labels. If a float, calculates a font size as a fraction of the image size.
  • label_outline: whether to draw an outline around the label text.
  • colored_labels: whether to color the labels based on the composites.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
  • dtype: the dtype of the output montage.
Returns

numpy array representing the montage.

def get_montage_shape( image_shape: tuple[int, int], n_images: int, border_size: int = 1, horizontal: bool = True) -> tuple[int, int, int]:
281def get_montage_shape(
282    image_shape: tuple[int, int],
283    n_images: int,
284    border_size: int = 1,
285    horizontal: bool = True,
286) -> tuple[int, int, int]:
287    """
288    Determine the size of the montage based on the images and order.
289    :param image_shape: tuple of height, width of the base images going into the montage.
290    :param n_images: how many images are going into the montage, including composite.
291    :param border_size: width of the border between images.
292    :param horizontal: whether to stack images horizontally or vertically.
293    :return: tuple of the height, width, and channels (always 3) of the montage.
294    """
295    if len(image_shape) != 2:
296        raise ValueError("Image shape must be a tuple of height, width.")
297    if image_shape[0] < 1 or image_shape[1] < 1:
298        raise ValueError("Image shape must be positive.")
299    if not isinstance(n_images, int) or n_images < 1:
300        raise ValueError("Number of images must be a positive integer.")
301
302    # Determine the size of the montage
303    if horizontal:
304        n_rows = 1
305        n_cols = n_images
306    else:
307        n_rows = n_images
308        n_cols = 1
309
310    # Determine the montage size
311    image_height, image_width = image_shape
312    montage_height = n_rows * image_height + (n_rows + 1) * border_size
313    montage_width = n_cols * image_width + (n_cols + 1) * border_size
314
315    return montage_height, montage_width, 3  # 3 for RGB

Determine the size of the montage based on the images and order.

Parameters
  • image_shape: tuple of height, width of the base images going into the montage.
  • n_images: how many images are going into the montage, including composite.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
Returns

tuple of the height, width, and channels (always 3) of the montage.

def scale_bit_depth( image: numpy.ndarray, dtype: numpy.dtype, real_bits: int = None) -> numpy.ndarray:
318def scale_bit_depth(
319    image: np.ndarray, dtype: np.dtype, real_bits: int = None
320) -> np.ndarray:
321    """
322    Converts the image to the desired bit depth, factoring in real bit depth.
323    :param image: numpy array representing the image.
324    :param dtype: the desired dtype of the image.
325    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
326    :return: numpy array representing the image with the new dtype.
327    """
328    if not isinstance(image, np.ndarray):
329        raise ValueError("Image must be a numpy array.")
330    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
331        image.dtype, np.floating
332    ):
333        raise ValueError("Input image dtype must be an unsigned integer or float.")
334    if np.issubdtype(image.dtype, np.floating) and (
335        np.min(image) < 0 or np.max(image) > 1
336    ):
337        raise ValueError("Image values must be between 0 and 1.")
338    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
339        dtype, np.floating
340    ):
341        raise ValueError("Output dtype must be an unsigned integer or float.")
342
343    # First, determine the scaling required for the real bit depth
344    scale = 1
345    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
346        dtype_bit_depth = np.iinfo(image.dtype).bits
347        if real_bits > dtype_bit_depth:
348            raise ValueError("Real bits must be less than or equal to image bit depth")
349        elif real_bits < dtype_bit_depth:
350            # We should scale up the values to the new bit depth
351            if np.max(image) > 2**real_bits:
352                raise ValueError("Image values exceed real bit depth; already scaled?")
353            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
354
355    # Already validated that the min is 0; determine the max
356    if np.issubdtype(image.dtype, np.unsignedinteger):
357        in_max = np.iinfo(image.dtype).max
358    else:
359        in_max = 1.0
360    if np.issubdtype(dtype, np.unsignedinteger):
361        out_max = np.iinfo(dtype).max
362    else:
363        out_max = 1.0
364
365    # Scale the image to the new bit depth
366    scale = scale * out_max / in_max
367    image = (image * scale).astype(dtype)
368    return image

Converts the image to the desired bit depth, factoring in real bit depth.

Parameters
  • image: numpy array representing the image.
  • dtype: the desired dtype of the image.
  • real_bits: the actual bit depth of the image, such as from a 14-bit camera.
Returns

numpy array representing the image with the new dtype.

def get_image_lightness(image: numpy.ndarray) -> float:
371def get_image_lightness(image: np.ndarray) -> float:
372    """
373    Calculate the lightness of an sRGB image, taking shortcuts for speed.
374    :param image: numpy array representing the sRGB image.
375    :return: approximate perceived lightness of the image, from 0 to 100.
376    """
377    # Scale image to [0, 1]
378    if np.issubdtype(image.dtype, np.unsignedinteger):
379        image = image / np.iinfo(image.dtype).max
380    # Rough conversion to linear RGB
381    image = image**2.2
382    # Average to a single color and return that color's lightness
383    color = np.mean(image, axis=(0, 1))
384    return get_lightness((color[0], color[1], color[2]), srgb=False)

Calculate the lightness of an sRGB image, taking shortcuts for speed.

Parameters
  • image: numpy array representing the sRGB image.
Returns

approximate perceived lightness of the image, from 0 to 100.

def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
387def get_lightness(color: tuple[float, float, float], srgb: bool = True) -> float:
388    """
389    Calculate the lightness of an sRGB color, taking shortcuts for speed.
390    :param color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
391    :param srgb: whether the color is in sRGB or linear RGB.
392    :return: approximate perceived lightness of the color, from 0 to 100.
393    """
394    if srgb:
395        # Convert to linear color, rough and quick
396        rgb = color[0] ** 2.2, color[1] ** 2.2, color[2] ** 2.2
397    else:
398        rgb = color
399    # Convert to luminance
400    luminance = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
401    # Convert to perceived lightness
402    if luminance <= 0.008856:
403        return 903.3 * luminance
404    else:
405        return 116 * luminance ** (1 / 3) - 16

Calculate the lightness of an sRGB color, taking shortcuts for speed.

Parameters
  • color: an sRGB or linear RGB color as a tuple, with values in [0, 1].
  • srgb: whether the color is in sRGB or linear RGB.
Returns

approximate perceived lightness of the color, from 0 to 100.