csi_images.csi_images

  1import numpy as np
  2import pandas as pd
  3from skimage.measure import regionprops_table
  4
  5
  6def extract_mask_info(
  7    mask: np.ndarray,
  8    images: list[np.ndarray] = None,
  9    image_labels: list[str] = None,
 10    properties: list[str] = None,
 11) -> pd.DataFrame:
 12    """
 13    Extracts events from a mask. Originated from @vishnu
 14    :param mask: mask to extract events from
 15    :param images: list of intensity images to extract from
 16    :param image_labels: list of labels for images
 17    :param properties: list of properties to extract in addition to the defaults:
 18    label, centroid, axis_major_length. See
 19    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
 20    for additional properties.
 21    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
 22    """
 23    # Return empty if the mask is empty
 24    if np.max(mask) == 0:
 25        return pd.DataFrame()
 26    # Reshape any intensity images
 27    if images is not None:
 28        if isinstance(images, list):
 29            images = np.stack(images, axis=-1)
 30        if image_labels is not None and len(image_labels) != images.shape[-1]:
 31            raise ValueError("Number of image labels must match number of images.")
 32    # Accumulate any extra properties
 33    base_properties = ["label", "centroid"]
 34    if properties is not None:
 35        properties = base_properties + properties
 36    else:
 37        properties = base_properties
 38
 39    # Use skimage.measure.regionprops_table to compute properties
 40    info = pd.DataFrame(
 41        regionprops_table(mask, intensity_image=images, properties=properties)
 42    )
 43
 44    # Rename columns to match desired output
 45    info = info.rename(
 46        columns={
 47            "label": "id",
 48            "centroid-0": "y",
 49            "centroid-1": "x",
 50        },
 51    )
 52    renamings = {}
 53    for column in info.columns:
 54        for i in range(len(image_labels)):
 55            suffix = f"-{i}"
 56            if column.endswith(suffix):
 57                renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
 58    info = info.rename(columns=renamings)
 59
 60    return info
 61
 62
 63def make_rgb(
 64    images: list[np.ndarray], colors=list[tuple[float, float, float]]
 65) -> np.ndarray:
 66    """
 67    Combine multiple channels into a single RGB image.
 68    :param images: list of numpy arrays representing the channels.
 69    :param colors: list of RGB tuples for each channel.
 70    :return:
 71    """
 72    if len(images) == 0:
 73        raise ValueError("No images provided.")
 74    if len(colors) == 0:
 75        raise ValueError("No colors provided.")
 76    if len(images) != len(colors):
 77        raise ValueError("Number of images and colors must match.")
 78    if not all([isinstance(image, np.ndarray) for image in images]):
 79        raise ValueError("Images must be numpy arrays.")
 80    if not all([len(c) == 3 for c in colors]):
 81        raise ValueError("Colors must be RGB tuples.")
 82
 83    # Create an output with same shape and larger type to avoid overflow
 84    dims = images[0].shape
 85    dtype = images[0].dtype
 86    if dtype not in [np.uint8, np.uint16]:
 87        raise ValueError("Image dtype must be uint8 or uint16.")
 88    rgb = np.zeros((*dims, 3), dtype=np.uint16 if dtype == np.uint8 else np.uint32)
 89
 90    # Combine images with colors (can also be thought of as gains)
 91    for image, color in zip(images, colors):
 92        if image.shape != dims:
 93            raise ValueError("All images must have the same shape.")
 94        if image.dtype != dtype:
 95            raise ValueError("All images must have the same dtype.")
 96        rgb[..., 0] += (image * color[0]).astype(rgb.dtype)
 97        rgb[..., 1] += (image * color[1]).astype(rgb.dtype)
 98        rgb[..., 2] += (image * color[2]).astype(rgb.dtype)
 99
100    # Cut off any overflow and convert back to original dtype
101    rgb = np.clip(rgb, np.iinfo(dtype).min, np.iinfo(dtype).max).astype(dtype)
102    return rgb
103
104
105def make_montage(
106    images: list[np.ndarray],
107    order: list[int] = None,
108    composites: dict[int, tuple[float, float, float]] = None,
109    border_size: int = 1,
110    horizontal: bool = True,
111    dtype=np.uint8,
112) -> np.ndarray:
113    """
114    Combine multiple images into a single montage based on order.
115    Can include a composite (always first).
116    :param images: list of numpy arrays representing the images.
117    :param order: list of indices for the images going into the montage.
118    :param composites: dictionary of indices and RGB tuples for a composite.
119    :param border_size: width of the border between images.
120    :param horizontal: whether to stack images horizontally or vertically.
121    :param dtype: the dtype of the output montage.
122    :return: numpy array representing the montage.
123    """
124    if len(images) == 0:
125        raise ValueError("No images provided.")
126    if not all([isinstance(image, np.ndarray) for image in images]):
127        raise ValueError("Images must be numpy arrays.")
128    if not all([len(image.shape) == 2 for image in images]):
129        raise ValueError("Images must be 2D.")
130    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
131        raise ValueError("Composites must be RGB tuples.")
132    if order is None and composites is None:
133        raise ValueError("No images or composites requested.")
134
135    # Populate the montage with black
136    n_images = len(order) if order is not None else 0
137    n_images += 1 if composites is not None else 0
138    montage = np.full(
139        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
140        np.iinfo(dtype).max,  # White fill
141        dtype=dtype,
142    )
143
144    # Populate the montage with images
145    offset = border_size  # Keeps track of the offset for the next image
146    image_height, image_width = images[0].shape
147
148    # Composite first
149    if composites is not None:
150        image = make_rgb(
151            [images[i] for i in composites.keys()],
152            list(composites.values()),
153        )
154        image = scale_bit_depth(image, dtype)
155        if horizontal:
156            montage[
157                border_size : border_size + image_height,
158                offset : offset + image_width,
159            ] = image
160            offset += image_width + border_size
161        else:
162            montage[
163                offset : offset + image_height,
164                border_size : border_size + image_width,
165            ] = image
166            offset += image_height + border_size
167
168    # Grayscale order next
169    for i in order:
170        image = images[i]
171        image = scale_bit_depth(image, dtype)
172        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
173        if horizontal:
174            montage[
175                border_size : border_size + image_height,
176                offset : offset + image_width,
177            ] = image
178            offset += image_width + border_size
179        else:
180            montage[
181                offset : offset + image_height,
182                border_size : border_size + image_width,
183            ] = image
184            offset += image_height + border_size
185
186    return montage
187
188
189def get_montage_shape(
190    image_shape: tuple[int, int],
191    n_images: int,
192    border_size: int = 1,
193    horizontal: bool = True,
194) -> tuple[int, int, int]:
195    """
196    Determine the size of the montage based on the images and order.
197    :param image_shape: tuple of height, width of the base images going into the montage.
198    :param n_images: how many images are going into the montage, including composite.
199    :param border_size: width of the border between images.
200    :param horizontal: whether to stack images horizontally or vertically.
201    :return: tuple of the height, width, and channels (always 3) of the montage.
202    """
203    if len(image_shape) != 2:
204        raise ValueError("Image shape must be a tuple of height, width.")
205    if image_shape[0] < 1 or image_shape[1] < 1:
206        raise ValueError("Image shape must be positive.")
207    if not isinstance(n_images, int) or n_images < 1:
208        raise ValueError("Number of images must be a positive integer.")
209
210    # Determine the size of the montage
211    if horizontal:
212        n_rows = 1
213        n_cols = n_images
214    else:
215        n_rows = n_images
216        n_cols = 1
217
218    # Determine the montage size
219    image_height, image_width = image_shape
220    montage_height = n_rows * image_height + (n_rows + 1) * border_size
221    montage_width = n_cols * image_width + (n_cols + 1) * border_size
222
223    return montage_height, montage_width, 3  # 3 for RGB
224
225
226def scale_bit_depth(
227    image: np.ndarray, dtype: np.dtype, real_bits: int = None
228) -> np.ndarray:
229    """
230    Converts the image to the desired bit depth, factoring in real bit depth.
231    :param image: numpy array representing the image.
232    :param dtype: the desired dtype of the image.
233    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
234    :return: numpy array representing the image with the new dtype.
235    """
236    if not isinstance(image, np.ndarray):
237        raise ValueError("Image must be a numpy array.")
238    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
239        image.dtype, np.floating
240    ):
241        raise ValueError("Input image dtype must be an unsigned integer or float.")
242    if np.issubdtype(image.dtype, np.floating) and (
243        np.min(image) < 0 or np.max(image) > 1
244    ):
245        raise ValueError("Image values must be between 0 and 1.")
246    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
247        dtype, np.floating
248    ):
249        raise ValueError("Output dtype must be an unsigned integer or float.")
250
251    # First, determine the scaling required for the real bit depth
252    scale = 1
253    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
254        dtype_bit_depth = np.iinfo(image.dtype).bits
255        if real_bits > dtype_bit_depth:
256            raise ValueError("Real bits must be less than or equal to image bit depth")
257        elif real_bits < dtype_bit_depth:
258            # We should scale up the values to the new bit depth
259            if np.max(image) > 2**real_bits:
260                raise ValueError("Image values exceed real bit depth; already scaled?")
261            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
262
263    # Already validated that the min is 0; determine the max
264    if np.issubdtype(image.dtype, np.unsignedinteger):
265        in_max = np.iinfo(image.dtype).max
266    else:
267        in_max = 1.0
268    if np.issubdtype(dtype, np.unsignedinteger):
269        out_max = np.iinfo(dtype).max
270    else:
271        out_max = 1.0
272
273    # Scale the image to the new bit depth
274    scale = scale * out_max / in_max
275    image = (image * scale).astype(dtype)
276    return image
def extract_mask_info( mask: numpy.ndarray, images: list[numpy.ndarray] = None, image_labels: list[str] = None, properties: list[str] = None) -> pandas.core.frame.DataFrame:
 7def extract_mask_info(
 8    mask: np.ndarray,
 9    images: list[np.ndarray] = None,
10    image_labels: list[str] = None,
11    properties: list[str] = None,
12) -> pd.DataFrame:
13    """
14    Extracts events from a mask. Originated from @vishnu
15    :param mask: mask to extract events from
16    :param images: list of intensity images to extract from
17    :param image_labels: list of labels for images
18    :param properties: list of properties to extract in addition to the defaults:
19    label, centroid, axis_major_length. See
20    https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
21    for additional properties.
22    :return: pd.DataFrame with columns: id, x, y, size, or an empty DataFrame
23    """
24    # Return empty if the mask is empty
25    if np.max(mask) == 0:
26        return pd.DataFrame()
27    # Reshape any intensity images
28    if images is not None:
29        if isinstance(images, list):
30            images = np.stack(images, axis=-1)
31        if image_labels is not None and len(image_labels) != images.shape[-1]:
32            raise ValueError("Number of image labels must match number of images.")
33    # Accumulate any extra properties
34    base_properties = ["label", "centroid"]
35    if properties is not None:
36        properties = base_properties + properties
37    else:
38        properties = base_properties
39
40    # Use skimage.measure.regionprops_table to compute properties
41    info = pd.DataFrame(
42        regionprops_table(mask, intensity_image=images, properties=properties)
43    )
44
45    # Rename columns to match desired output
46    info = info.rename(
47        columns={
48            "label": "id",
49            "centroid-0": "y",
50            "centroid-1": "x",
51        },
52    )
53    renamings = {}
54    for column in info.columns:
55        for i in range(len(image_labels)):
56            suffix = f"-{i}"
57            if column.endswith(suffix):
58                renamings[column] = f"{image_labels[i]}_{column[:-len(suffix)]}"
59    info = info.rename(columns=renamings)
60
61    return info

Extracts events from a mask. Originated from @vishnu

Parameters
Returns

pd.DataFrame with columns: id, x, y, size, or an empty DataFrame

def make_rgb( images: list[numpy.ndarray], colors=list[tuple[float, float, float]]) -> numpy.ndarray:
 64def make_rgb(
 65    images: list[np.ndarray], colors=list[tuple[float, float, float]]
 66) -> np.ndarray:
 67    """
 68    Combine multiple channels into a single RGB image.
 69    :param images: list of numpy arrays representing the channels.
 70    :param colors: list of RGB tuples for each channel.
 71    :return:
 72    """
 73    if len(images) == 0:
 74        raise ValueError("No images provided.")
 75    if len(colors) == 0:
 76        raise ValueError("No colors provided.")
 77    if len(images) != len(colors):
 78        raise ValueError("Number of images and colors must match.")
 79    if not all([isinstance(image, np.ndarray) for image in images]):
 80        raise ValueError("Images must be numpy arrays.")
 81    if not all([len(c) == 3 for c in colors]):
 82        raise ValueError("Colors must be RGB tuples.")
 83
 84    # Create an output with same shape and larger type to avoid overflow
 85    dims = images[0].shape
 86    dtype = images[0].dtype
 87    if dtype not in [np.uint8, np.uint16]:
 88        raise ValueError("Image dtype must be uint8 or uint16.")
 89    rgb = np.zeros((*dims, 3), dtype=np.uint16 if dtype == np.uint8 else np.uint32)
 90
 91    # Combine images with colors (can also be thought of as gains)
 92    for image, color in zip(images, colors):
 93        if image.shape != dims:
 94            raise ValueError("All images must have the same shape.")
 95        if image.dtype != dtype:
 96            raise ValueError("All images must have the same dtype.")
 97        rgb[..., 0] += (image * color[0]).astype(rgb.dtype)
 98        rgb[..., 1] += (image * color[1]).astype(rgb.dtype)
 99        rgb[..., 2] += (image * color[2]).astype(rgb.dtype)
100
101    # Cut off any overflow and convert back to original dtype
102    rgb = np.clip(rgb, np.iinfo(dtype).min, np.iinfo(dtype).max).astype(dtype)
103    return rgb

Combine multiple channels into a single RGB image.

Parameters
  • images: list of numpy arrays representing the channels.
  • colors: list of RGB tuples for each channel.
Returns
def make_montage( images: list[numpy.ndarray], order: list[int] = None, composites: dict[int, tuple[float, float, float]] = None, border_size: int = 1, horizontal: bool = True, dtype=<class 'numpy.uint8'>) -> numpy.ndarray:
106def make_montage(
107    images: list[np.ndarray],
108    order: list[int] = None,
109    composites: dict[int, tuple[float, float, float]] = None,
110    border_size: int = 1,
111    horizontal: bool = True,
112    dtype=np.uint8,
113) -> np.ndarray:
114    """
115    Combine multiple images into a single montage based on order.
116    Can include a composite (always first).
117    :param images: list of numpy arrays representing the images.
118    :param order: list of indices for the images going into the montage.
119    :param composites: dictionary of indices and RGB tuples for a composite.
120    :param border_size: width of the border between images.
121    :param horizontal: whether to stack images horizontally or vertically.
122    :param dtype: the dtype of the output montage.
123    :return: numpy array representing the montage.
124    """
125    if len(images) == 0:
126        raise ValueError("No images provided.")
127    if not all([isinstance(image, np.ndarray) for image in images]):
128        raise ValueError("Images must be numpy arrays.")
129    if not all([len(image.shape) == 2 for image in images]):
130        raise ValueError("Images must be 2D.")
131    if composites is not None and not all([len(c) == 3 for c in composites.values()]):
132        raise ValueError("Composites must be RGB tuples.")
133    if order is None and composites is None:
134        raise ValueError("No images or composites requested.")
135
136    # Populate the montage with black
137    n_images = len(order) if order is not None else 0
138    n_images += 1 if composites is not None else 0
139    montage = np.full(
140        get_montage_shape(images[0].shape, n_images, border_size, horizontal),
141        np.iinfo(dtype).max,  # White fill
142        dtype=dtype,
143    )
144
145    # Populate the montage with images
146    offset = border_size  # Keeps track of the offset for the next image
147    image_height, image_width = images[0].shape
148
149    # Composite first
150    if composites is not None:
151        image = make_rgb(
152            [images[i] for i in composites.keys()],
153            list(composites.values()),
154        )
155        image = scale_bit_depth(image, dtype)
156        if horizontal:
157            montage[
158                border_size : border_size + image_height,
159                offset : offset + image_width,
160            ] = image
161            offset += image_width + border_size
162        else:
163            montage[
164                offset : offset + image_height,
165                border_size : border_size + image_width,
166            ] = image
167            offset += image_height + border_size
168
169    # Grayscale order next
170    for i in order:
171        image = images[i]
172        image = scale_bit_depth(image, dtype)
173        image = np.tile(image[..., None], (1, 1, 3))  # Make 3-channel
174        if horizontal:
175            montage[
176                border_size : border_size + image_height,
177                offset : offset + image_width,
178            ] = image
179            offset += image_width + border_size
180        else:
181            montage[
182                offset : offset + image_height,
183                border_size : border_size + image_width,
184            ] = image
185            offset += image_height + border_size
186
187    return montage

Combine multiple images into a single montage based on order. Can include a composite (always first).

Parameters
  • images: list of numpy arrays representing the images.
  • order: list of indices for the images going into the montage.
  • composites: dictionary of indices and RGB tuples for a composite.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
  • dtype: the dtype of the output montage.
Returns

numpy array representing the montage.

def get_montage_shape( image_shape: tuple[int, int], n_images: int, border_size: int = 1, horizontal: bool = True) -> tuple[int, int, int]:
190def get_montage_shape(
191    image_shape: tuple[int, int],
192    n_images: int,
193    border_size: int = 1,
194    horizontal: bool = True,
195) -> tuple[int, int, int]:
196    """
197    Determine the size of the montage based on the images and order.
198    :param image_shape: tuple of height, width of the base images going into the montage.
199    :param n_images: how many images are going into the montage, including composite.
200    :param border_size: width of the border between images.
201    :param horizontal: whether to stack images horizontally or vertically.
202    :return: tuple of the height, width, and channels (always 3) of the montage.
203    """
204    if len(image_shape) != 2:
205        raise ValueError("Image shape must be a tuple of height, width.")
206    if image_shape[0] < 1 or image_shape[1] < 1:
207        raise ValueError("Image shape must be positive.")
208    if not isinstance(n_images, int) or n_images < 1:
209        raise ValueError("Number of images must be a positive integer.")
210
211    # Determine the size of the montage
212    if horizontal:
213        n_rows = 1
214        n_cols = n_images
215    else:
216        n_rows = n_images
217        n_cols = 1
218
219    # Determine the montage size
220    image_height, image_width = image_shape
221    montage_height = n_rows * image_height + (n_rows + 1) * border_size
222    montage_width = n_cols * image_width + (n_cols + 1) * border_size
223
224    return montage_height, montage_width, 3  # 3 for RGB

Determine the size of the montage based on the images and order.

Parameters
  • image_shape: tuple of height, width of the base images going into the montage.
  • n_images: how many images are going into the montage, including composite.
  • border_size: width of the border between images.
  • horizontal: whether to stack images horizontally or vertically.
Returns

tuple of the height, width, and channels (always 3) of the montage.

def scale_bit_depth( image: numpy.ndarray, dtype: numpy.dtype, real_bits: int = None) -> numpy.ndarray:
227def scale_bit_depth(
228    image: np.ndarray, dtype: np.dtype, real_bits: int = None
229) -> np.ndarray:
230    """
231    Converts the image to the desired bit depth, factoring in real bit depth.
232    :param image: numpy array representing the image.
233    :param dtype: the desired dtype of the image.
234    :param real_bits: the actual bit depth of the image, such as from a 14-bit camera.
235    :return: numpy array representing the image with the new dtype.
236    """
237    if not isinstance(image, np.ndarray):
238        raise ValueError("Image must be a numpy array.")
239    if not np.issubdtype(image.dtype, np.unsignedinteger) and not np.issubdtype(
240        image.dtype, np.floating
241    ):
242        raise ValueError("Input image dtype must be an unsigned integer or float.")
243    if np.issubdtype(image.dtype, np.floating) and (
244        np.min(image) < 0 or np.max(image) > 1
245    ):
246        raise ValueError("Image values must be between 0 and 1.")
247    if not np.issubdtype(dtype, np.unsignedinteger) and not np.issubdtype(
248        dtype, np.floating
249    ):
250        raise ValueError("Output dtype must be an unsigned integer or float.")
251
252    # First, determine the scaling required for the real bit depth
253    scale = 1
254    if real_bits is not None and np.issubdtype(image.dtype, np.unsignedinteger):
255        dtype_bit_depth = np.iinfo(image.dtype).bits
256        if real_bits > dtype_bit_depth:
257            raise ValueError("Real bits must be less than or equal to image bit depth")
258        elif real_bits < dtype_bit_depth:
259            # We should scale up the values to the new bit depth
260            if np.max(image) > 2**real_bits:
261                raise ValueError("Image values exceed real bit depth; already scaled?")
262            scale = np.iinfo(image.dtype).max / (2**real_bits - 1)
263
264    # Already validated that the min is 0; determine the max
265    if np.issubdtype(image.dtype, np.unsignedinteger):
266        in_max = np.iinfo(image.dtype).max
267    else:
268        in_max = 1.0
269    if np.issubdtype(dtype, np.unsignedinteger):
270        out_max = np.iinfo(dtype).max
271    else:
272        out_max = 1.0
273
274    # Scale the image to the new bit depth
275    scale = scale * out_max / in_max
276    image = (image * scale).astype(dtype)
277    return image

Converts the image to the desired bit depth, factoring in real bit depth.

Parameters
  • image: numpy array representing the image.
  • dtype: the desired dtype of the image.
  • real_bits: the actual bit depth of the image, such as from a 14-bit camera.
Returns

numpy array representing the image with the new dtype.