Exemplo n.º 1
0
    def slice_pre(data_slice):

        data_normalized = normalize(data_slice)
        data_oryg = data_normalized.copy()
        data_hist = exposure.equalize_adapthist(data_normalized,
                                                clip_limit=0.1)

        data_median = filters.median(data_hist, selem=disk(3))
        data_median = exposure.adjust_gamma(data_median, gamma=10)

        thresholds = filters.threshold_multiotsu(data_median)
        regions = np.digitize(data_normalized, bins=thresholds)

        data_normalized[regions != 2] = 0

        data_pre = exposure.equalize_hist(data_normalized)

        thresholds = filters.threshold_multiotsu(data_pre)

        regions = np.digitize(data_pre, bins=thresholds)
        data_mask = label2rgb(regions)
        data_mask = rgb2gray(data_mask)
        data_mask[data_mask != np.max(data_mask)] = 0
        data_mask[data_mask == np.max(data_mask)] = 1
        data_mask = closing(data_mask, selem=disk(3)).astype(int)
        data_mask = remove_small_holes(data_mask, area_threshold=300)

        data_eq = exposure.equalize_adapthist(data_oryg, clip_limit=0.15)
        data_preprocessed = data_eq * data_mask
        data_preprocessed = opening(data_preprocessed, selem=disk(3))

        return data_preprocessed, data_mask
Exemplo n.º 2
0
def detect_people(img, img_path):
    # local_logger.info(img.shape)
    # First, predict if image has people
    prediction = model(img, training=False).numpy()
    # local_logger.info(prediction)

    # Second, if image has peple then do the localisization
    if prediction[0][0] > prediction[0][1]:
        flat_img = get_flat_img(img)
        sum_mat = get_2d_sum_mat(model_weights, flat_img)
        # replace all negative values with 0
        no_neg_sum_mat = sum_mat.copy()
        no_neg_sum_mat[no_neg_sum_mat < 0] = 0

        # Using Otsu threshold to locate people
        # sum_mat = sum_mat.astype('float32')
        # max = np.max(sum_mat)
        # min = np.min(sum_mat)
        # raw_th = max - 0.4*(max - min)
        # otsu_th, otsu_mat = cv2.threshold(sum_mat, raw_th, max, cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
        # local_logger.info(max, min, otsu_th)
        # draw_img_plot(img_path, otsu_mat, otsu_th, PLOT_DIR)

        # thresh = threshold_otsu(sum_mat)
        thresh_arr = threshold_multiotsu(no_neg_sum_mat, classes=3)
        thresh = thresh_arr[-1]
        # local_logger.info('Otsu threshold value: ' + repr(thresh))
        # Considering threshold_multiotsu for better localization?

        draw_img_plot(img_path, sum_mat, thresh, plot_path)
        return 1
    else:
        return 0
def extract_foreground(img):
    """
    Extracts the single largest object from grayscale image img.
    Returns a boolean mask and a skimage RegionProperty for that object.
    """
    thresholds = threshold_multiotsu(img, classes=3)
    true_fg = np.digitize(img, [thresholds[0]])

    mask_edge = get_edge_mask(true_fg, 100, 20)
    masked_img = extract_mask(img, mask_edge)
    new_true_fg = new_edge_threshold(masked_img, mask_edge, true_fg)

    new_mask_edge = get_edge_mask(new_true_fg, 50, 10)
    new_masked_img = extract_mask(img, new_mask_edge)
    last_true_fg = new_edge_threshold(
        new_masked_img,
        new_mask_edge,
        new_true_fg,
    )

    labels = label(last_true_fg)
    props = regionprops(labels, img)

    areas = np.asarray([prop.area for prop in props])
    ind = areas.argmax()

    prop = props[ind]
    mask = binary_fill_holes(labels == prop.label)

    return mask, prop
Exemplo n.º 4
0
    def segment(self, img):
        """
        Applies Otsu's Multi-Tresholding to a Image

        Parameters
        ----------
        img: np.ndarray
            The image to perform segmentation.

        Returns
        -------
        str
            The path where the segmented image was saved.
        """

        # Apply Gaussian Blur to smooth the image (5x5 kernel)
        img = filters.gaussian(img)
        # Apply otsu's global thresholding method
        thresholds = filters.threshold_multiotsu(img,
                                                 classes=self.n_thresholds)
        # Use the found thresholds to segment image
        img = np.digitize(img, bins=thresholds)

        # Scale the image
        img = scale_img(img)

        return img
Exemplo n.º 5
0
def extract_foreground_biofilms(img, area_threshold=9000):
    thresholds = threshold_multiotsu(img, classes=3)
    true_fg = np.digitize(img, [thresholds[0]])

    mask_edge = get_edge_mask(true_fg, 100, 20)
    masked_img = extract_mask(img, mask_edge)
    new_true_fg = new_edge_threshold(masked_img, mask_edge, true_fg)

    new_mask_edge = get_edge_mask(new_true_fg, 50, 10)
    new_masked_img = extract_mask(img, new_mask_edge)
    last_true_fg = new_edge_threshold(
        new_masked_img,
        new_mask_edge,
        new_true_fg,
    )

    labels = label(last_true_fg)
    props = regionprops(labels, img)

    areas = np.asarray([prop.area for prop in props])
    mask = np.zeros_like(labels)
    inds = np.arange(len(areas))[areas > area_threshold]
    for i, ind in enumerate(inds):
        prop = props[ind]
        mask[binary_fill_holes(labels == prop.label)] = i + 1

    return mask
def transform_image(original, classes=3, cmap_result='gray', filename=None):
    thresholds = threshold_multiotsu(original, classes=classes)
    regions = np.digitize(original, bins=thresholds)

    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(10, 3.5))

    # Plotting the original image.
    ax[0].imshow(original, cmap='gray')
    ax[0].set_title('Grayscale')
    ax[0].axis('off')

    # Plotting the histogram and the two thresholds obtained from
    # multi-Otsu.
    ax[1].hist(original.ravel(), bins=255)
    ax[1].set_title('Histogram')
    for thresh in thresholds:
        ax[1].axvline(thresh, color='r')

    # Plotting the Multi Otsu result.
    ax[2].imshow(regions, cmap=cmap_result)
    ax[2].set_title(str(classes) + ' gray levels')
    ax[2].axis('off')

    plt.subplots_adjust()
    plt.savefig('./img/preprocess_image_' + filename + '_' + str(classes) + 'classes.png', transparent=True)
    plt.show()
def extract_foreground_biofilms(img, area_threshold=9000):
    """
    Extracts the single largest object from grayscale image img.
    Returns a boolean mask and a skimage RegionProperty for that object.
    """
    thresholds = threshold_multiotsu(img, classes=3)
    true_fg = np.digitize(img, [thresholds[0]])

    mask_edge = get_edge_mask(true_fg, 100, 20)
    masked_img = extract_mask(img, mask_edge)
    new_true_fg = new_edge_threshold(masked_img, mask_edge, true_fg)

    new_mask_edge = get_edge_mask(new_true_fg, 50, 10)
    new_masked_img = extract_mask(img, new_mask_edge)
    last_true_fg = new_edge_threshold(
        new_masked_img,
        new_mask_edge,
        new_true_fg,
    )

    labels = label(last_true_fg)
    props = regionprops(labels, img)

    areas = np.asarray([prop.area for prop in props])
    mask = np.zeros_like(labels)
    inds = np.arange(len(areas))[areas > area_threshold]
    for i, ind in enumerate(inds):
        prop = props[ind]
        mask[binary_fill_holes(labels == prop.label)] = i + 1

    return mask
Exemplo n.º 8
0
    def _binarise(self, arr):
        threshes = filters.threshold_multiotsu(arr, classes=2)
        # if type(threshes) is np.ndarray:  # Some thresholds return multiple levels - reduce to 2
        #     threshes = threshes[0]
        binarised = arr > threshes

        return binarised
Exemplo n.º 9
0
def cloud_mask_otsu(image):
    image = np.array(image)
    thresh = threshold_multiotsu(image, 4)
    max_thresh = max(thresh)
    mask = closing(image > max_thresh, square(10))
    del max_thresh, thresh
    return np.array(mask)
Exemplo n.º 10
0
def segmentation(image, arrow_bb):
    """
    returns a binary image 
                    - Input : RGB image
                    - Output : binary image
    """

    # crop robot from image
    minr_a, minc_a, maxr_a, maxc_a = arrow_bb
    test_im = image.copy()
    test_im[minr_a-30:maxr_a+50, minc_a-30:maxc_a+50] = 255
    
    # Change exposure to get a brighter image
    bright_im = skimage.exposure.adjust_gamma(test_im, gamma=3/5, gain=1)
    
    gray = skimage.color.rgb2gray(test_im)
    
    # Contrast stretching
    a, b = np.percentile(gray, (1, 70))
    img_rescale = exposure.rescale_intensity(gray, in_range=(a, b))
    
    # Multi thresholds to get digits and operators at once
    thresholds = threshold_multiotsu(img_rescale)
    
    # Using the threshold values, we generate the three regions.
    regions = np.digitize(gray, bins=thresholds)
    
    # Binarization, discriminate background from foreground
    binarized = regions == 0
    
    # Morphological operation
    close = morphology.binary_dilation(binarized, skimage.morphology.selem.rectangle(1, 3))
    
    return close
Exemplo n.º 11
0
 def _generate_otsu_tick_points(self) -> List[float]:
     """
     Determine the Otsu threshold tick point.
     """
     vals = filters.threshold_multiotsu(self.image,
                                        classes=self.view.num_materials)
     return self._normalise_tick_values(vals.tolist())
Exemplo n.º 12
0
def area_extract(array, coordinates, otsu_bins=20, expand_distance=5):
    h_upper, w_upper = array.shape
    labeled_area = np.zeros_like(array)
    # Extracting the available area from the given boxes
    for lbl, (x, y, w, h, _) in enumerate(coordinates):
        # Decide weather all the area inside the box is available
        # Notice: the 'lbl' used here is different from the 'label' used in object annotating
        # during the object recognition processing, but only for the pixel labeling.
        xmin, xmax = np.clip(x - w / 2, 0,
                             None).astype(int), np.clip(x + w / 2, 0,
                                                        w_upper).astype(int)
        ymin, ymax = np.clip(y - h / 2, 0,
                             None).astype(int), np.clip(y + h / 2, 0,
                                                        h_upper).astype(int)

        # Extract available areas from boxes
        strict_area = array[ymin:ymax, xmin:xmax]
        otsu_thres = threshold_otsu(strict_area, nbins=otsu_bins)

        # Labeling available areas
        y_index, x_index = (strict_area >= otsu_thres).nonzero()
        labeled_area[y_index + ymin, x_index + xmin] = lbl + 1

    # Expanding the extracted areas
    expanded_labels = expand_labels(labeled_area, distance=expand_distance)
    # Re-thresholding of the expanded areas using Otsu-threshold
    for lbl in range(1, np.max(expanded_labels).astype(int)):
        lbl_area = (array - np.min(array)) * (expanded_labels == lbl)
        regions = np.digitize(lbl_area, bins=threshold_multiotsu(lbl_area))
        expanded_labels[regions == 1] = -lbl
        expanded_labels[regions == 2] = lbl
    return expanded_labels
Exemplo n.º 13
0
    def extractdisease(img):
        #ekstrak bercak
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        L, A, B = cv2.split(lab)
        thresholds = threshold_multiotsu(A)
        # Using the threshold values, we generate the three regions.
        regions = np.digitize(A, bins=thresholds)
        T = []
        for thresh in thresholds:
            T.append(thresh)

            def th(array):
                temp_img = np.copy(array)
                for i in range(len(temp_img)):
                    for j in range(len(temp_img[0])):
                        if (temp_img[i][j] >= T[1] and temp_img[i][j] >= T[0]):
                            temp_img[i][j] = 255
                        else:
                            temp_img[i][j] = 0
                return temp_img

        th = th(A)
        mask = th
        result = cv2.bitwise_and(img, img, mask=mask)
        return result
    def otsu(image, n_classes):
        try:
            thresholds = threshold_multiotsu(image, classes=n_classes)
        except ValueError:
            # When output is constant can't apply otsu thresholding
            thresholds = np.ones(1)

        return np.digitize(image, bins=thresholds).astype('uint8')
Exemplo n.º 15
0
def threshold_multiotsu(arr1d, classes):
    """
    TypeError: ndarray() missing required argument 'shape' (pos 1)
    :param arr1d:
    :return:
    """
    import skimage.filters as sf
    thresh = sf.threshold_multiotsu(arr1d, classes, nbins=256)
    return thresh
Exemplo n.º 16
0
def extract_mesh(fname, vol, nmaterials=2, pitch=1.0):
    """
    vol (torch.tensor)
    """
    vol = (vol - vol.min()) / (vol.max() + -vol.min() + 1e-12)

    if nmaterials == 2:
        thresh = threshold_otsu(vol)
        voln = skimage.restoration.denoise_tv_chambolle(voln, 0.0001)

        verts, faces, normals, values = measure.marching_cubes_lewiner(
            voln, level=thresh, allow_degenerate=False)
        mesh = trimesh.Trimesh(verts, faces)
        obj_ = trimesh.exchange.obj.export_obj(mesh)
        with open(fname, "w") as f:
            f.write(obj_)

    else:
        #from sklearn.cluster import KMeans
        #kmeans = KMeans(n_clusters=3).fit(vol.reshape([-1,1]))
        #c = sorted(kmeans.cluster_centers_[:,0])
        #print("centroids of Kmeans:", c)

        thresholds = threshold_multiotsu(image, nmaterials)

        f = open(fname, 'w')
        vert_start = 0

        f.write(f"# attenations by kmeans: {str(c)}")

        for i in range(nmaterials - 1):
            f.write(f"o objecect{i} {i+1} {i}")

            thresh = (c[i] + c[i + 1]) / 2.
            # extract for each material
            print(f"extract a material with a threshold {thresh}")
            new_vol = vol.copy()
            new_vol[new_vol < c[i]] = 0.
            new_vol[new_vol >= c[i + 1]] = 1.
            new_vol = (new_vol - c[i]) / (c[i + 1] - c[i] + 1e-8)

            mesh = ops.matrix_to_marching_cubes(matrix=new_vol, pitch=pitch)

            #mesh.export(fname[:-4]+str(i)+".obj")

            for vert in mesh.vertices:
                f.write('v %f %f %f\n' % tuple(vert + vert_start))

            for face in mesh.faces:
                f.write('f %d %d %d\n' % tuple(face + 1 + vert_start))

            vert_start += mesh.vertices.shape[0]
            print(vert_start)

        f.close()
        return
def get_forms(image):
    """
    Function that highlight the regions of interest.
    
    Input : - image --> image to labelize
    
    Outputs : - shapes --> matrix with binarized image
              - forms --> dictionnary with forms info with 'key' : [#pix, form_number, form region]
    """

    # Dictionnary to stock information about the different shapes
    forms = dict()

    # Conversion to grayscale
    gray = skimage.color.rgb2gray(image)

    # Otsu to find the shapes
    thresholds = threshold_multiotsu(gray, classes=2)
    thresh_background = thresholds[0] * 1.38

    ##### Shapes detection (cutting the background) #####

    shapes = np.zeros(gray.shape)
    for i in range(gray.shape[0]):
        for j in range(gray.shape[1]):
            if gray[i][j] < thresh_background:
                shapes[i][j] = 255

    classes = np.zeros(shapes.shape)
    k = 0
    for i in range(shapes.shape[0]):
        for j in range(shapes.shape[1]):
            if shapes[i][j] == 255 and classes[i][
                    j] == 0:  # When a new shape is encountered
                k += 1
                region = region_growing(shapes, [i, j], 10)
                pix = 0
                color = 0
                for r in region:  # Iterations into a shape
                    pix += 1  # Counter for the number of pixel inside a shape
                    color += gray[r[0]][r[1]]
                    classes[r[0]][r[
                        1]] = k  # Separation of the different form by labelling

                forms['Shape', k] = [pix, k, region]

                # Cutting of the regions with more then 500 pixels
                if forms['Shape', k][0] > 500 or forms['Shape', k][0] <= 3:
                    w = np.where(classes == forms['Shape', k][1])
                    shapes[w[0], w[1]] = 0
                    del forms['Shape', k]

    return shapes, forms
Exemplo n.º 18
0
 def get_mask(channel: str = 'lab') -> List[np.ndarray]:
     """Calcule un masque de segmentation.
     Si channel = 'lab', convertit au préalable l'image dans l'espace LAB.
     Sinon, utilise le canal rouge, vert ou bleu pour calculer le ou les
     seuils optimaux de séparation des différentes couches de l'image."""
     # conversion du RGB au LAB
     # plus de détails ici : https://en.wikipedia.org/wiki/CIELAB_color_space
     # et ici pour la méthode de calcul:
     # https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html#color_convert_rgb_lab
     new_img = img.copy()
     mask = np.ones((img.shape[:2]))
     # application de la méthode d'Otsu sur l'image dans l'espace de couleurs LAB
     # ou sur les canaux Rouge, Vert ou Bleu de l'espace RGB.
     if channel == 'A (LAB)':
         lab = cv2.cvtColor(new_img, cv2.COLOR_BGR2LAB)[..., 1]
         _t = threshold_multiotsu(lab, classes)[0]
     elif channel == 'rouge (RGB)':
         lab = new_img[..., 0]
         _t = threshold_multiotsu(lab, classes)[1]
     elif channel == 'vert (RGB)':
         lab = new_img[..., 1]
         _t = threshold_multiotsu(lab, classes)[1]
     elif channel == 'bleu (RGB)':
         lab = new_img[..., 2]
         _t = threshold_multiotsu(lab, classes)[1]
     # définition du masque de segmentation
     if channel == 'A (LAB)':
         mask = 1 - (lab < _t) * 1
     else:
         mask = (lab < _t) * 1
         lab = 1 - lab
     # les pixels du fond sont codés en RGB comme (255, 255, 255)
     new_img[np.where(mask == 0)] = 255
     if smooth:
         mask = binary_closing(mask, iterations=15)
         mask = binary_opening(mask, iterations=10)
         if fill_holes:
             mask = binary_fill_holes(mask)
         new_img[np.where(mask == 0)] = 255
     return lab, mask, new_img
Exemplo n.º 19
0
    def get_master_mask(self, sigma=1, kernel_size=5, mask_ext=5, nuclear_ext=2, multi_otsu_nucleus_mask=True):
        """ Whole cell mask building by Ca dye channel data with Otsu thresholding.
        Filters greater element of draft Otsu mask and return master mask array.

        mask_ext - otsu mask extension value in px

        """
        trun = lambda k, sd: (((k - 1)/2)-0.5)/sd  # calculate truncate value for gaussian fliter according to sigma value and kernel size

        self.detection_img = filters.gaussian(np.mean(self.ca_series, axis=0), sigma=sigma, truncate=trun(kernel_size, sigma))

        # multi Otsu mask for nucleus detection
        self.multi_otsu_nucleus_mask = multi_otsu_nucleus_mask
        if self.multi_otsu_nucleus_mask:
            multi_otsu = filters.threshold_multiotsu(self.detection_img, classes=3)
            self.element_label = np.digitize(self.detection_img, bins=multi_otsu)  # 1 - cell elements, 2 - intracellular and nuclear elements

            # get larger multi Otsu cellular element
            cell_element = (self.element_label == 1) | (self.element_label == 2)
            cell_element_label = measure.label(cell_element)
            cell_element_area = {element.area : element.label for element in measure.regionprops(cell_element_label)}
            cell_border_mask = cell_element_label == cell_element_area[max(cell_element_area.keys())]
            self.cell_distances, _ = distance_transform_edt(~cell_border_mask, return_indices=True)
            self.cell_mask = self.cell_distances <= mask_ext

            # get larger multi Otsu intracellular element
            nuclear_element = self.element_label == 2
            nuclear_element_label = measure.label(nuclear_element)
            nuclear_element_area = {element.area : element.label for element in measure.regionprops(nuclear_element_label)}
            nuclear_element_border = nuclear_element_label == nuclear_element_area[max(nuclear_element_area.keys())]
            self.nuclear_distances, _ = distance_transform_edt(~nuclear_element_border, return_indices=True)
            self.nuclear_mask = self.nuclear_distances <= nuclear_ext

            self.master_mask = np.copy(self.cell_mask)
            self.master_mask[self.nuclear_mask] = 0
        else:
            # please, DON'T use this option!
            otsu = filters.threshold_otsu(self.detection_img)
            draft_mask = self.detection_img > otsu
            self.element_label, self.element_num = measure.label(draft_mask, return_num=True)
            logging.info(f'{self.element_num} Otsu mask elements detected')

            detection_label = np.copy(self.element_label)
            element_area = {element.area : element.label for element in measure.regionprops(detection_label)}
            self.master_mask = detection_label == element_area[max(element_area.keys())]

            # mask expansion
            self.cell_distances, _ = distance_transform_edt(~self.master_mask, return_indices=True)
            self.master_mask = self.cell_distances <= mask_ext

        self.total_byte_prot_img = filters.gaussian(util.img_as_ubyte(np.mean(self.prot_series, axis=0)/np.max(np.abs(np.mean(self.prot_series, axis=0)))), sigma=sigma, truncate=trun(kernel_size, sigma))
        self.total_mask_ctrl_img = label2rgb(self.master_mask, image=self.total_byte_prot_img, colors=['blue'], alpha=0.2)
 def get_mask(self):
     """
         Gets the binary mask of detected nuclei, as well as the labeled image
     """
     # compute threshold (to detect nuclei)
     #         threshold = filters.threshold_minimum(filt_img) #nuclei are among the darkest parts of the image
     thresholds = filters.threshold_multiotsu(self.gray_img,
                                              classes=self.numClasses)
     threshold = thresholds[0]
     # binarize
     self.binary_img = self.gray_img < threshold
     # label
     self.labeled_img = measure.label(self.binary_img, background=0)
Exemplo n.º 21
0
def _do_locate_reference_cell(
    image: Image, reference_area: int, scale: float = 0.1,
) -> Polygon:
    """Perform localization of reference cell"""

    # filter + binarize
    image_f = transform.rescale(image.data, scale)

    thresh = filters.threshold_multiotsu(image_f, classes=5)[0]
    image_t = image_f > thresh

    # find regions
    labeled = morphology.label(image_t)
    regions = measure.regionprops(labeled)

    # drop areas that are not approximately square
    regions = [
        r
        for r in regions
        if np.abs(r.bbox[2] - r.bbox[0]) / np.abs(r.bbox[3] - r.bbox[1]) > 0.8
        and np.abs(r.bbox[2] - r.bbox[0]) / np.abs(r.bbox[3] - r.bbox[1]) < 1.8
    ]

    # convert to Polygon
    tmp: List[Polygon] = []
    for r in regions:
        bbox = [int(r.bbox[i] * 1 / scale) for i in range(4)]
        y0, x0 = max(0, bbox[0]), max(0, bbox[1])
        y1, x1 = (
            min(image.shape[0], bbox[2]),
            min(image.shape[1], bbox[3]),
        )
        box = Polygon.from_bounds(x0, y0, x1, y1)
        tmp.append(box)

    # drop boxes that intersect with others
    regions = [
        r for r in tmp if not np.any([x.intersects(r) and x is not r for x in tmp])
    ]

    if len(regions) == 0:
        return None

    # process regions
    area_dev = [np.abs((r.area - reference_area) / reference_area) for r in regions]
    min_dev_idx = np.argmin(area_dev)

    if area_dev[min_dev_idx] < 2.0:
        return affinity.scale(regions[min_dev_idx], xfact=0.5, yfact=0.5)
    else:
        return None
Exemplo n.º 22
0
def SegmentByOtsu(Image):
    from skimage.filters import threshold_multiotsu
    IM = Image.copy()

    if (len(IM.shape) == 3):
        IM = cv2.cvtColor(IM, cv2.COLOR_RGB2GRAY)

    thresh = threshold_multiotsu(IM)
    Mask0 = (IM > thresh[0])
    Mask1 = (IM > thresh[1])
    Mask0 = Mask0.astype(int)
    Mask1 = Mask1.astype(int)

    return Mask0, Mask1, thresh
Exemplo n.º 23
0
    def otsu(self, image, num_classes=None):
        '''
        image: str or array
        '''
        if num_classes is None:
            num_classes = self.classes
        # Read image if it is a string
        if type(image) == str:
            image = imread(image)

        thresholds = threshold_multiotsu(image, classes=num_classes)
        segm = np.digitize(image, bins=thresholds)

        return segm
Exemplo n.º 24
0
def MultiThreshold(imageName, thresholdValues=None, classCount=3):

    base_path = "./images"

    # Setting the font size for all plots.
    matplotlib.rcParams['font.size'] = 9

    # The input image.
    image = cv.imread(f"{base_path}/{imageName}", 0)

    # Applying multi-Otsu threshold for the default value, generating
    # three classes.

    thresholds = threshold_multiotsu(image, classCount)

    if thresholdValues:
        if isinstance(thresholdValues, list):
            thresholds = thresholdValues
        else:
            print("Threshold Values Must Be List!!!")
            print("The Program Continues With Multi Otsu Method!!!")

    # Using the threshold values, we generate the three regions.
    regions = np.digitize(image, bins=thresholds)

    fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))

    # Plotting the original image.
    ax[0].imshow(image, cmap='gray')
    ax[0].set_title('Original')
    ax[0].axis('off')

    # Plotting the histogram and the two thresholds obtained from
    # multi-Otsu.
    ax[1].hist(image.ravel(), bins=255)
    ax[1].set_title('Histogram')
    for thresh in thresholds:
        ax[1].axvline(thresh, color='r')

    # Plotting the Multi Otsu result.
    ax[2].imshow(
        regions,
        cmap='jet',
    )
    ax[2].set_title('Multi-Otsu result')
    ax[2].axis('off')

    plt.subplots_adjust()

    plt.show()
Exemplo n.º 25
0
def threshold(index):
    ''' 
    Caluclates shoreline value threshold from water index image and plots histogram 
    Yarran Doherty & Kilian Vos 2020
    '''

    # Convert to 1d array and ermove nan values
    vec_im = np.copy(index)
    vec = vec_im.reshape(vec_im.shape[0] * vec_im.shape[1])
    vec = vec[~np.isnan(vec)]

    # Perform multi otsu thresholding
    t_otsu = filters.threshold_multiotsu(vec)

    # Plot histogram
    plt.ioff()

    # create figure
    fig = plt.figure()
    fig.tight_layout()
    fig.set_size_inches([8, 8])
    ax = fig.add_subplot(111)

    # Set labels
    ax.set_title('NDWI Pixel Value Histogram Thresholding', fontsize=10)
    ax.set_xlabel('NDWI Pixel Value', fontsize=10)
    #ax.set_ylabel("Pixel Count", fontsize= 10)
    ax.set_ylabel("Pixel Class PDF", fontsize=10)
    ax.axes.yaxis.set_ticks([])

    # Plot threshold value(s)
    ax.axvline(x=t_otsu[0], color='k', linestyle='--', label='Class Threshold')
    ax.axvline(x=t_otsu[1],
               color='k',
               linestyle='--',
               label='Shoreline Threshold')

    # Add legend
    ax.legend(bbox_to_anchor=(1, 1),
              loc='lower right',
              framealpha=1,
              fontsize=8)  #, fontsize = 'xx-small')

    # Plot histogram
    ax.hist(vec, 150, color='blue', alpha=0.8, density=True)

    plt.show()

    return t_otsu
Exemplo n.º 26
0
def get_model_freqs(probas, n_classes=3):
    freqs = []

    pbar = tqdm.tqdm(total=len(probas))
    for prob in probas:
        thr = threshold_multiotsu(prob, classes=n_classes)[0]

        freqs.append((prob >= thr).sum() / prob.size)

        pbar.set_description('[obsrv]')
        pbar.update(1)

    pbar.close()

    return freqs
Exemplo n.º 27
0
def pick_Threshold(listClasses):
    from skimage.filters import threshold_multiotsu
    listImage = []
    multi_threshold = []
    [listImage.extend(classList) for classList in listClasses]

    for image_path in listImage:
        img = process_img(image_path)
        image_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        image_dilation = IncreaseDescreaseMask(image_gray, 3)
        image_erosion = IncreaseDescreaseMask(image_gray, -3)
        image_re_dilation = IncreaseDescreaseMask(image_erosion, 6)
        image_compare = image_re_dilation - image_dilation
        multi_threshold.append(threshold_multiotsu(image_compare))
    return np.mean(np.array(multi_threshold), axis=0).tolist()
Exemplo n.º 28
0
def GetFeature_Differential_GrayInfo_Mean(image_color, multi_thresholds):
    from skimage.filters import threshold_multiotsu
    image_gray = cv2.cvtColor(image_color, cv2.COLOR_RGB2GRAY)

    image_dilation = IncreaseDescreaseMask(image_gray, 6)
    image_erosion = IncreaseDescreaseMask(image_gray, -3)
    image_re_dilation = IncreaseDescreaseMask(image_erosion, 9)

    # image_compare_1 = image_dilation - image_re_dilation
    image_compare = image_re_dilation - image_dilation
    threshold_compare = threshold_multiotsu(image_compare)
    image_differential = (image_compare > multi_thresholds[0]) * (
        image_compare < multi_thresholds[1])

    return [np.mean(image_differential), np.mean(threshold_compare)]
Exemplo n.º 29
0
def thresholded(input_image: np.ndarray,
                method: str,
                binarize: bool = False) -> np.ndarray:
    # https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.threshold_isodata
    foreground_image_mask = input_image.copy().astype(np.float32)
    if method == 'otsu':
        input_image_threshold = skimagefilters.threshold_otsu(
            foreground_image_mask)
    elif method == 'multi':
        input_image_threshold = skimagefilters.threshold_multiotsu(
            foreground_image_mask, classes=4)
    elif method == 'adaptive':
        input_image_threshold = skimagefilters.threshold_local(
            foreground_image_mask,
            block_size=3,
            method='mean'  # 'gaussian', 'median'
        )
    elif method == 'li':
        input_image_threshold = skimagefilters.threshold_li(
            foreground_image_mask)
    elif method == 'triangle':
        input_image_threshold = skimagefilters.threshold_triangle(
            foreground_image_mask)
    elif method == 'mean':
        input_image_threshold = skimagefilters.threshold_mean(
            foreground_image_mask)
    else:
        raise RuntimeError(f"threshold method {method} is not supported.")
    if method == 'multi':
        input_image_threshold = input_image_threshold.tolist()
        max_val = np.max(input_image)
        input_image_threshold = [0] + input_image_threshold + [max_val]
        num_segments = len(input_image_threshold)
        for segment in range(1, num_segments):
            prev_thresh = input_image_threshold[segment - 1]
            curr_thresh = input_image_threshold[segment]
            foreground_image_mask[(foreground_image_mask >= prev_thresh) & (
                foreground_image_mask < curr_thresh)] = round(prev_thresh)
        highest_thresh = input_image_threshold[-1]
        foreground_image_mask[foreground_image_mask >= highest_thresh] = round(
            highest_thresh)
    else:
        foreground_image_mask[
            foreground_image_mask < input_image_threshold] = 0.0
        if binarize:
            foreground_image_mask[foreground_image_mask > 0.0] = 1.0
    # foreground_image_mask = foreground_image_mask.astype(np.uint8)
    return foreground_image_mask
Exemplo n.º 30
0
def auto_thresh_obs(
    adata,
    obs_cols=["arcsinh_n_genes_by_counts"],
    methods=["multiotsu"],
    directions=["above"],
):
    """
    automated thresholding on metrics in adata.obs

    Parameters:
        adata (anndata.AnnData): object containing unfiltered scRNA-seq data
        obs_cols (list of str): name of column(s) to threshold from adata.obs
        methods (list of str): one of 'otsu', 'multiotsu', 'li', or 'mean'
        directions (list of str): 'below' or 'above', indicating which direction to keep

    Returns:
        thresholds (dict): keys are obs_cols and values are dictionaries with
        "thresh" : threshold results & "direction" : direction to keep for training
    """
    # convert to lists before looping
    if isinstance(obs_cols, str):
        obs_cols = [obs_cols]
    if isinstance(methods, str):
        methods = [methods]
    if isinstance(directions, str):
        methods = [directions]
    # initiate output dictionary
    thresholds = dict.fromkeys(obs_cols)
    # add thresholds as subkey
    for i in range(len(obs_cols)):
        thresholds[obs_cols[i]] = {}  # initiate empty dict
        tmp = np.array(adata.obs[obs_cols[i]])  # grab values to threshold
        if methods[i] == "multiotsu":
            thresholds[obs_cols[i]]["thresh"] = threshold_multiotsu(tmp)
        elif methods[i] == "otsu":
            thresholds[obs_cols[i]]["thresh"] = threshold_otsu(tmp)
        elif methods[i] == "li":
            thresholds[obs_cols[i]]["thresh"] = threshold_li(tmp)
        elif methods[i] == "mean":
            thresholds[obs_cols[i]]["thresh"] = threshold_mean(tmp)
        else:
            raise ValueError(
                "Please provide a valid threshold method ('otsu', 'multiotsu', 'li', 'mean')."
            )
        # add direction for thresholding as subkey
        thresholds[obs_cols[i]]["direction"] = directions[i]

    return thresholds