def test_log_filter(): # float64 y_float64 = stack.cast_img_float64(y) filtered_y_float64 = stack.log_filter(y_float64, 2) expected_y_float64 = np.array( [[0., 0., 0.02995949, 0.06212277, 0.07584532], [0., 0., 0.02581818, 0.05134284, 0.06123539], [0., 0., 0.01196859, 0.0253716, 0.02853162], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]], dtype=np.float64) assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6) assert filtered_y_float64.dtype == np.float64 # float32 y_float32 = stack.cast_img_float32(y) filtered_y = stack.log_filter(y_float32, 2) expected_y = stack.cast_img_float32(expected_y_float64) assert_allclose(filtered_y, expected_y, rtol=1e-6) assert filtered_y.dtype == np.float32 # uint8 filtered_y = stack.log_filter(y, 2) expected_y = stack.cast_img_uint8(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint8 # uint16 y_uint16 = stack.cast_img_uint16(y) filtered_y = stack.log_filter(y_uint16, 2) expected_y = stack.cast_img_uint16(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint16
def test_cast_uint16(dtype): # from integer to np.uint16 if np.issubdtype(dtype, np.integer): x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tensor = np.array(x).reshape((3, 3)).astype(dtype) tensor[2, 2] = np.iinfo(dtype).max # from float to np.uint16 else: x = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 1.0]] tensor = np.array(x).reshape((3, 3)).astype(dtype) # cast in uint16 if dtype in [np.uint8, np.int8, np.uint16, np.int16, np.float16]: tensor_uint16 = stack.cast_img_uint16(tensor) else: with pytest.warns(UserWarning): tensor_uint16 = stack.cast_img_uint16(tensor) assert tensor_uint16.dtype == np.uint16
def test_gaussian_filter(): # float64 y_float64 = stack.cast_img_float64(y) filtered_y_float64 = stack.gaussian_filter(y_float64, 2) expected_y_float64 = np.array( [[0.08928096, 0.1573019, 0.22897881, 0.28086597, 0.3001061], [0.08668051, 0.14896399, 0.21282558, 0.25752308, 0.27253406], [0.07634613, 0.12664142, 0.17574502, 0.20765944, 0.2155001], [0.05890843, 0.09356377, 0.12493327, 0.1427122, 0.14374558], [0.03878372, 0.05873308, 0.07492625, 0.08201409, 0.07939603]], dtype=np.float64) assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6) assert filtered_y_float64.dtype == np.float64 # float32 y_float32 = stack.cast_img_float32(y) filtered_y = stack.gaussian_filter(y_float32, 2) expected_y = stack.cast_img_float32(expected_y_float64) assert_allclose(filtered_y, expected_y, rtol=1e-6) assert filtered_y.dtype == np.float32 # uint8 with pytest.raises(ValueError): stack.gaussian_filter(y, 2, allow_negative=True) filtered_y = stack.gaussian_filter(y, 2) expected_y = stack.cast_img_uint8(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint8 # uint16 y_uint16 = stack.cast_img_uint16(y) with pytest.raises(ValueError): stack.gaussian_filter(y_uint16, 2, allow_negative=True) filtered_y = stack.gaussian_filter(y_uint16, 2) expected_y = stack.cast_img_uint16(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint16
def test_background_removal_gaussian(): # float64 y_float64 = stack.cast_img_float64(y) filtered_y_float64 = stack.remove_background_gaussian(y_float64, 2) expected_y_float64 = np.array( [[0., 0., 0.01415845, 0.36227129, 0.], [0., 0., 0.25776265, 0.66404555, 0.43726986], [0., 0., 0.11052949, 0.59626213, 0.], [0., 0.42016172, 0., 0., 0.], [0., 0., 0., 0., 0.]], dtype=np.float64) assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6) assert filtered_y_float64.dtype == np.float64 # float32 y_float32 = stack.cast_img_float32(y) filtered_y = stack.remove_background_gaussian(y_float32, 2) expected_y = stack.cast_img_float32(expected_y_float64) assert_allclose(filtered_y, expected_y, rtol=1e-6) assert filtered_y.dtype == np.float32 # uint8 with pytest.raises(ValueError): stack.gaussian_filter(y, 2, allow_negative=True) filtered_y = stack.remove_background_gaussian(y, 2) expected_y = stack.cast_img_uint8(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint8 # uint16 y_uint16 = stack.cast_img_uint16(y) with pytest.raises(ValueError): stack.gaussian_filter(y_uint16, 2, allow_negative=True) filtered_y = stack.remove_background_gaussian(y_uint16, 2) expected_y = stack.cast_img_uint16(expected_y_float64) assert_array_equal(filtered_y, expected_y) assert filtered_y.dtype == np.uint16
def test_cast_uint16(dtype): # from integer to np.uint16 if np.issubdtype(dtype, np.integer): x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tensor = np.array(x).reshape((3, 3)).astype(dtype) tensor[2, 2] = np.iinfo(dtype).max # from float to np.uint16 else: x = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 1.0]] tensor = np.array(x).reshape((3, 3)).astype(dtype) # cast in uint16 tensor_uint16 = stack.cast_img_uint16(tensor) assert tensor_uint16.dtype == np.uint16 # check value assert tensor_uint16.max() == 65535 if dtype == np.uint16: assert_array_equal(tensor_uint16, tensor)
def get_watershed_relief(image, nuc_label, alpha): """Build a representation of cells as watershed. In a watershed algorithm we consider cells as watershed to be flooded. The watershed relief is inversely proportional to both the pixel intensity and the closeness to nuclei. Pixels with a high intensity or close to labelled nuclei have a low watershed relief value. They will be flooded in priority. Flooding the watersheds allows to propagate nuclei labels through potential cytoplasm areas. The lines separating watershed are the final segmentation of the cells. Parameters ---------- image : np.ndarray, np.uint Cells image with shape (z, y, x) or (y, x). nuc_label : np.ndarray, np.int64 Result of the nuclei segmentation with shape (y, x) and nuclei instances labelled. alpha : float or int Weight of the pixel intensity values to compute the relief. Returns ------- watershed_relief : np.ndarray, np.uint16 Watershed representation of cells with shape (y, x). """ # check parameters stack.check_array(image, ndim=[2, 3], dtype=[np.uint8, np.uint16]) stack.check_array(nuc_label, ndim=2, dtype=np.int64) stack.check_parameter(alpha=(int, float)) # use pixel intensity of the cells image if alpha == 1: # if a 3-d image is provided we sum its pixel values image = stack.cast_img_float64(image) if image.ndim == 3: image = image.sum(axis=0) # rescale image image = stack.rescale(image) # build watershed relief watershed_relief = image.max() - image watershed_relief[nuc_label > 0] = 0 watershed_relief = np.true_divide(watershed_relief, watershed_relief.max(), dtype=np.float64) watershed_relief = stack.cast_img_uint16(watershed_relief, catch_warning=True) # use distance from the nuclei elif alpha == 0: # build watershed relief nuc_mask = nuc_label > 0 watershed_relief = ndi.distance_transform_edt(~nuc_mask) watershed_relief = np.true_divide(watershed_relief, watershed_relief.max(), dtype=np.float64) watershed_relief = stack.cast_img_uint16(watershed_relief, catch_warning=True) # use a combination of both previous methods elif 0 < alpha < 1: # if a 3-d image is provided we sum its pixel values image = stack.cast_img_float64(image) if image.ndim == 3: image = image.sum(axis=0) # rescale image image = stack.rescale(image) # build watershed relief relief_pixel = image.max() - image relief_pixel[nuc_label > 0] = 0 relief_pixel = np.true_divide(relief_pixel, relief_pixel.max(), dtype=np.float64) nuc_mask = nuc_label > 0 relief_distance = ndi.distance_transform_edt(~nuc_mask) relief_distance = np.true_divide(relief_distance, relief_distance.max(), dtype=np.float64) watershed_relief = alpha * relief_pixel + (1 - alpha) * relief_distance watershed_relief = stack.cast_img_uint16(watershed_relief, catch_warning=True) else: raise ValueError("Parameter 'alpha' is wrong. It must be comprised " "between 0 and 1. Currently 'alpha' is {0}" .format(alpha)) return watershed_relief
def build_cyt_relief(image_projected, nuc_labelled, mask_cyt, alpha=0.8): """Compute a 2-d representation of the cytoplasm to be used by watershed algorithm. Cells are represented as watershed, with a low values to the center and maximum values at their borders. The equation used is: relief = alpha * relief_pixel + (1 - alpha) * relief_distance - 'relief_pixel' exploit the differences in pixel intensity values. - 'relief_distance' use the distance from the nuclei. Parameters ---------- image_projected : np.ndarray, np.uint Projected image of the cytoplasm with shape (y, x). nuc_labelled : np.ndarray, Result of the nuclei segmentation with shape (y, x). mask_cyt : np.ndarray, bool Binary mask of the cytoplasm with shape (y, x). alpha : float or int Weight of the pixel intensity values to compute the relief. A value of 0 and 1 respectively return 'relief_distance' and 'relief_pixel'. Returns ------- relief : np.ndarray, np.uint Relief image of the cytoplasm with shape (y, x). """ # check parameters stack.check_array(image_projected, ndim=2, dtype=[np.uint8, np.uint16]) stack.check_array(nuc_labelled, ndim=2, dtype=[np.uint8, np.uint16, np.int64, bool]) stack.check_array(mask_cyt, ndim=2, dtype=[bool]) stack.check_parameter(alpha=(float, int)) # use pixel intensity of the cytoplasm channel to compute the seed. if alpha == 1: relief = image_projected.copy() max_intensity = np.iinfo(image_projected.dtype).max relief = max_intensity - relief relief[nuc_labelled > 0] = 0 relief[mask_cyt == 0] = max_intensity relief = stack.rescale(relief) # use distance from the nuclei elif alpha == 0: binary_mask_nuc = nuc_labelled > 0 relief = ndi.distance_transform_edt(~binary_mask_nuc) relief[mask_cyt == 0] = relief.max() relief = np.true_divide(relief, relief.max(), dtype=np.float32) if image_projected.dtype == np.uint8: relief = stack.cast_img_uint8(relief) else: relief = stack.cast_img_uint16(relief) # use both previous methods elif 0 < alpha < 1: relief_pixel = image_projected.copy() max_intensity = np.iinfo(image_projected.dtype).max relief_pixel = max_intensity - relief_pixel relief_pixel[nuc_labelled > 0] = 0 relief_pixel[mask_cyt == 0] = max_intensity relief_pixel = stack.rescale(relief_pixel) relief_pixel = stack.cast_img_float32(relief_pixel) binary_mask_nuc = nuc_labelled > 0 relief_distance = ndi.distance_transform_edt(~binary_mask_nuc) relief_distance[mask_cyt == 0] = relief_distance.max() relief_distance = np.true_divide(relief_distance, relief_distance.max(), dtype=np.float32) relief = alpha * relief_pixel + (1 - alpha) * relief_distance if image_projected.dtype == np.uint8: relief = stack.cast_img_uint8(relief) else: relief = stack.cast_img_uint16(relief) else: raise ValueError("Parameter 'alpha' is wrong. Must be comprised " "between 0 and 1. Currently 'alpha' is {0}" .format(alpha)) return relief
def apply_unet_distance_double(model, nuc, cell, nuc_label, target_size=None, test_time_augmentation=False): """Segment cell with a pretrained model to predict distance map and use it with a watershed algorithm. Parameters ---------- model : ``tensorflow.keras.model`` object Pretrained Unet model that predict distance to edges and cell surface. nuc : np.ndarray, np.uint Original nucleus image with shape (y, x). cell : np.ndarray, np.uint Original cell image to segment with shape (y, x). nuc_label : np.ndarray, np.int64 Labelled nucleus image. Each nucleus is characterized by the same pixel value. target_size : int Resize image before segmentation. A squared image is resize to `target_size`. A rectangular image is resize such that its smaller dimension equals `target_size`. test_time_augmentation : bool Apply test time augmentation or not. The image is augmented 8 times and the final segmentation is the average result over these augmentations. Returns ------- cell_label_pred : np.ndarray, np.int64 Labelled cell image. Each cell is characterized by the same pixel value. """ # check parameters stack.check_parameter( target_size=(int, type(None)), test_time_augmentation=bool) stack.check_array(nuc, ndim=2, dtype=[np.uint8, np.uint16]) stack.check_array(cell, ndim=2, dtype=[np.uint8, np.uint16]) stack.check_array(nuc_label, ndim=2, dtype=np.int64) # get original shape height, width = cell.shape # resize cell image if necessary if target_size is None: # keep the original shape nuc_to_process = nuc.copy() cell_to_process = cell.copy() nuc_label_to_process = nuc_label.copy() new_height, new_width = height, width else: # target size should be multiple of 16 target_size -= 16 # we resize the images below the target size ratio = target_size / min(height, width) new_height = int(np.round(height * ratio)) new_width = int(np.round(width * ratio)) new_shape = (new_height, new_width) nuc_to_process = stack.resize_image(nuc, new_shape, "bilinear") cell_to_process = stack.resize_image(cell, new_shape, "bilinear") nuc_label_to_process = nuc_label.copy() # get padding marge to make it multiple of 16 marge_padding = stack.get_marge_padding(new_height, new_width, x=16) top, bottom = marge_padding[0] left, right = marge_padding[1] nuc_to_process = np.pad( nuc_to_process, pad_width=marge_padding, mode='symmetric') cell_to_process = np.pad( cell_to_process, pad_width=marge_padding, mode='symmetric') # standardize and cast cell image nuc_to_process = stack.compute_image_standardization(nuc_to_process) nuc_to_process = nuc_to_process.astype(np.float32) cell_to_process = stack.compute_image_standardization(cell_to_process) cell_to_process = cell_to_process.astype(np.float32) # augment images if test_time_augmentation: nuc_to_process = stack.augment_8_times(nuc_to_process) cell_to_process = stack.augment_8_times(cell_to_process) n_augmentations = 8 else: nuc_to_process = [nuc_to_process] cell_to_process = [cell_to_process] n_augmentations = 1 # loop over augmentations predictions_cell = [] predictions_distance = [] for i in range(n_augmentations): # get images nuc_to_process_ = nuc_to_process[i] nuc_to_process_ = nuc_to_process_[np.newaxis, :, :, np.newaxis] cell_to_process_ = cell_to_process[i] cell_to_process_ = cell_to_process_[np.newaxis, :, :, np.newaxis] # make predictions prediction = model.predict([nuc_to_process_, cell_to_process_]) # remove padding if i in [0, 1, 2, 6]: prediction_cell = prediction[1][0, top:-bottom, left:-right, 0] prediction_distance = prediction[2][0, top:-bottom, left:-right, 0] else: prediction_cell = prediction[1][0, left:-right, top:-bottom, 0] prediction_distance = prediction[2][0, left:-right, top:-bottom, 0] # resize predictions back taking into account a potential deformation # from the image augmentation if target_size is not None: if i in [0, 1, 2, 6]: prediction_cell = stack.resize_image( prediction_cell, (height, width), "bilinear") prediction_distance = stack.resize_image( prediction_distance, (height, width), "bilinear") else: prediction_cell = stack.resize_image( prediction_cell, (width, height), "bilinear") prediction_distance = stack.resize_image( prediction_distance, (width, height), "bilinear") # store predictions predictions_cell.append(prediction_cell) predictions_distance.append(prediction_distance) # reversed image augmentation if test_time_augmentation: predictions_cell = stack.augment_8_times_reversed(predictions_cell) predictions_distance = stack.augment_8_times_reversed( predictions_distance) mean_prediction_cell = np.mean(predictions_cell, axis=0) mean_prediction_distance = np.mean(predictions_distance, axis=0) else: mean_prediction_cell = predictions_cell[0] mean_prediction_distance = predictions_distance[0] # inverse and format distance map min_ = mean_prediction_distance.min() max_ = max(1, mean_prediction_distance.max()) mean_prediction_distance -= min_ mean_prediction_distance /= max_ mean_prediction_distance = 1 - mean_prediction_distance mean_prediction_distance = np.clip(mean_prediction_distance, 0, 1) mean_prediction_distance = stack.cast_img_uint16(mean_prediction_distance) # postprocess predictions _, cell_label_pred = from_distance_to_instances( label_x_nuc=nuc_label_to_process, label_2_cell=mean_prediction_cell, label_distance=mean_prediction_distance, compute_nuc_label=False) return cell_label_pred