Beispiel #1
0
def compositeThreshold(gray, mode='com'):
    if mode == 'otsu':
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        return otsu_bin
    elif mode == 'yen':
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return yen_bin
    elif mode == 'li':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        return li_bin
    elif mode == 'niblack':
        niblack = threshold_niblack(gray, window_size=13, k=0.8)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return niblack_bin
    elif mode == 'sauvola':
        sauvola = threshold_sauvola(gray, window_size=13)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        return sauvola_bin
    elif mode == 'com':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return cv2.min(cv2.min(otsu_bin, li_bin), yen_bin)
    elif mode == "niblack-multi":
        thr = np.zeros((gray.shape), dtype=np.uint8)
        thr[thr >= 0] = 255
        for k in np.linspace(-0.8, 0.2, 5):  #(-1.8,0.2,5)
            thresh_niblack = threshold_niblack(gray, window_size=25, k=k)
            binary_niblack = gray > thresh_niblack
            binary_niblack = binary_niblack.astype(np.uint8) * 255
            showResult("binary_niblack", binary_niblack)
            thr = cv2.min(thr, binary_niblack)
        return thr
    else:
        sauvola = threshold_sauvola(gray, window_size=25, k=0.25)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        niblack = threshold_niblack(gray, window_size=25, k=0.25)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return cv2.max(sauvola, niblack)
def cell_detect(frame):
    try:
        thresh = threshold_yen(frame)
    except:
        thresh = threshold_yen(frame)
    binary = frame >= thresh
    closed = binary_closing(binary)
    # dilated = dilation(binary, square(5))
    label_img = label(closed)
    cellLocs = regionprops(label_img)
    cellLocs = [p for p in cellLocs if p.area > 100]
    return cellLocs
Beispiel #3
0
def hysteresis(image):
    high = filters.threshold_yen(image)
    low = high * 0.9
    hight = (image > high).astype(int)
    lowt = (image > low).astype(int)
    binary = filters.apply_hysteresis_threshold(image, low, high)
    return binary
Beispiel #4
0
def threshold(image, algorithm='otsu', nbins=256):
    """Computes thresholding value according to specified algorithm.

    Args:
        image (array-like): Input image. Is converted to grayscale if RGB.

        algorithm (str, {'otsu', 'li', 'yen'}): Thresholding algorithm to
            determine threshold value. Defaults to 'otsu'.

        nbins (int): The number of bins used to calculate histogram as input to
            thresholding algorithms. Defaults to 256 bins.

    Returns:
        thresh (float): Thresholding value.

    """

    _image = _check_image(image)

    if algorithm == 'otsu':
        thresh = filters.threshold_otsu(_image, nbins=nbins)

    elif algorithm == 'li':
        thresh = filters.threshold_li(_image)

    elif algorithm == 'yen':
        thresh = filters.threshold_yen(_image, nbins=nbins)

    else:
        raise ValueError('Algorithm {} not available'.format(algorithm))

    return thresh
Beispiel #5
0
def calculate_masked_stats():
    plate_no = "59798"
    parsed = get_plate_files(plate_no)
    for w in ['w2']:
        files = filter(lambda f: f.wave == w[1], parsed)
        # accum = np.zeros((2160, 2160), dtype=np.uint32)
        # files = filter(lambda x: 's1' not in x and 's7' not in x, all_files)
        nof = len(files)
        for i, frame in enumerate(files[0:5], 1):
            LogHelper.logText(frame.fullpath)
            img = imread(frame.fullpath)
            t = filters.threshold_yen(img)
            b1 = img > t
            b2 = binary_erosion(b1, square(2))
            b3 = binary_dilation(b2, square(10))
            b4 = binary_closing(b3, square(3))
            imm = np.ma.masked_where(b4, img)
            mn, mx = np.percentile(imm, (1, 99))
            LogHelper.logText(
                '%3d of %d, %4d-%4d-%4d-%5d, %.0f-%.0f'
                % (i, nof, imm.min(), mn, mx, imm.max(), imm.mean(), imm.std())
            )
            im2 = imm.filled(int(imm.mean()))
            out_name = "{0}\\{5}-{1}{2}-{3}-{4}.tif".format(ROOT_DIR, frame.row, frame.column, frame.site, LogHelper.init_ts, frame.experiment)
            imsave(out_name, im2)
Beispiel #6
0
def get_img_thr(img, ratio=10): 
    thr = threshold_yen(img)
    background = img[img<thr]
    if thr < ratio*np.mean(background):
        print("thr:{0:.5f}, ratio*mean:{1:.5f}".format(thr,ratio*img.mean()))
        return 0
    return thr
Beispiel #7
0
def autothreshold(data, threshold_type='otsu', z=2.3264, set_max_ceiling=True):
    if threshold_type.endswith('_p'):
        data = data[data > 0]
    else:
        data = data[data != 0]
    if (threshold_type == 'otsu') or (threshold_type == 'otsu_p'):
        lthres = filters.threshold_otsu(data)
        uthres = data[data > lthres].mean() + (z * data[data > lthres].std())
        # Otsu N (1979) A threshold selection method from gray-level histograms. IEEE Trans. Sys., Man., Cyber. 9: 62-66.
    elif (threshold_type == 'li') or (threshold_type == 'li_p'):
        lthres = filters.threshold_li(data)
        uthres = data[data > lthres].mean() + (z * data[data > lthres].std())
        # Li C.H. and Lee C.K. (1993) Minimum Cross Entropy Thresholding Pattern Recognition, 26(4): 617-625
    elif (threshold_type == 'yen') or (threshold_type == 'yen_p'):
        lthres = filters.threshold_yen(data)
        uthres = data[data > lthres].mean() + (z * data[data > lthres].std())
        # Yen J.C., Chang F.J., and Chang S. (1995) A New Criterion for Automatic Multilevel Thresholding IEEE Trans. on Image Processing, 4(3): 370-378.
    elif threshold_type == 'zscore_p':
        lthres = data.mean() - (z * data.std())
        uthres = data.mean() + (z * data.std())
        if lthres < 0:
            lthres = 0.001
    else:
        lthres = data.mean() - (z * data.std())
        uthres = data.mean() + (z * data.std())
    if set_max_ceiling:  # for the rare case when uthres is larger than the max value
        if uthres > data.max():
            uthres = data.max()
    return lthres, uthres
def psIImask(img, mode='thresh'):
    ''' 
    Input:
    img = greyscale image
    mode = type of thresholding to perform. Currently only 'thresh' is available
    '''

    # pcv.plot_image(img)
    if mode is 'thresh':

        # this entropy based technique seems to work well when algae is present
        algaethresh = filters.threshold_yen(image=img)
        threshy = pcv.threshold.binary(img, algaethresh, 255, 'light')
        # mask = pcv.dilate(threshy, 2, 1)
        mask = pcv.fill(threshy, 150)
        mask = pcv.erode(mask, 2, 1)
        mask = pcv.fill(mask, 45)
        # mask = pcv.dilate(mask, 2,1)
        final_mask = mask  # pcv.fill(mask, 270)

    else:
        pcv.fatal_error(
            'mode must be "thresh" (default) or an object of class pd.DataFrame'
        )

    return final_mask
def show_thresholds():
    images = f['images']
    filenames = f['filenames']
    for i in range(21, 40):
        image = images[i]
        thresholds = {
            "thresh_yen": skfilt.threshold_yen(image),
            "thresh_otsu": skfilt.threshold_otsu(image),
            "thresh_li": skfilt.threshold_li(image),
            "thresh_iso": skfilt.threshold_isodata(image)
        }
        name = filenames[i]
        fig = plt.figure(i)
        arr = np.asarray(image)
        ax = fig.add_subplot(3, 2, 1)
        ax.imshow(arr, cmap='gray')
        ax.set_title(name)
        for i_plt, thresh_type in enumerate(thresholds.keys()):
            thresh_val = thresholds[thresh_type]
            ax1 = fig.add_subplot(3, 2, i_plt + 2)
            ax1.imshow(image > thresh_val,
                       interpolation='nearest',
                       cmap='gray')
            ax1.set_title(thresh_type +
                          ": {}".format(np.round(thresholds[thresh_type])))
        plt.show()
def threshold_yen_filter(
    folder
):  # iterate through folders, assembling feature, label, and classname data objects
    class_id = 0
    features = []
    labels = np.array([])
    classnames = []
    for root, dirs, filenames in os.walk(folder):
        for d in sorted(dirs):
            #print("Reading data from", d)
            classnames.append(
                d)  # use the folder name as the class name for this label
            files = os.listdir(os.path.join(root, d))
            for f in files:
                imgFile = os.path.join(root, d, f)  # Load the image file
                img = plt.imread(imgFile)
                img = cv2.resize(
                    img,
                    (128,
                     128))  # Resizing all the images to insure proper reading
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                threshold_yen_filt = threshold_yen(img)

                features.append(threshold_yen_filt.ravel())
                labels = np.append(
                    labels, class_id)  # Add it to the numpy array of labels
            class_id += 1

    features = np.array(
        features)  # Convert the list of features into a numpy array
    return features, labels, classnames
Beispiel #11
0
def binarize_imageset(image_set):

    imgbin_otsu, imgbin_yen, imgbin_li, imgbin_iso, imgbin_tri, imgbin_mlss = [
        [] for _ in range(6)
    ]

    for idx, img in enumerate(image_set):
        # Filtering
        #img = ndimage.median_filter(img, size=(7, 7))
        img = denoise_tv_chambolle(img, weight=0.05)
        #img = rescale_intensity(img, in_range=(0, 0.5))
        # Otsu
        imgbin_otsu.append(binarize_imageset_aux(img < threshold_otsu(img)))
        # Yen
        imgbin_yen.append(binarize_imageset_aux(img < threshold_yen(img)))
        # Li
        imgbin_li.append(binarize_imageset_aux(img < threshold_li(img)))
        # ISODATA
        imgbin_iso.append(binarize_imageset_aux(img < threshold_isodata(img)))
        # MLSS
        aux = pd.read_csv('auto_count/mlss/imgbin_mlss' + str(idx + 1) +
                          '.csv')
        imgbin_mlss.append(binarize_imageset_aux(np.asarray(aux,
                                                            dtype='bool')))

    return imgbin_otsu, imgbin_yen, imgbin_li, imgbin_iso, imgbin_mlss
Beispiel #12
0
    def apply(self, matrix):
        binary = []

        if self.func == 'global':
            value = 0
            if self.method == 'otsu':
                value = threshold_otsu(matrix)
            if self.method == 'isodata':
                value = threshold_isodata(matrix)
            if self.method == 'yen':
                value = threshold_yen(matrix)
            if self.method == 'median':
                value = numpy.median(matrix)
            if self.method == 'kmeans':
                aa = numpy.array(matrix).reshape(-1)
                aa.shape = (aa.shape[0], 1)
                cc = k_means(aa, 5)
                ccc = cc[0].reshape(-1)
                ccc.sort()
                value = ccc[len(ccc) - 2]
            binary = matrix > value

        if self.func == 'adaptive':
            binary = threshold_adaptive(matrix, 127, self.method)

        return binary.astype('float')
def get_background_and_foreground(channel):
    # Binarizar imagen
    tresh = threshold_yen(channel)
    mask = channel > tresh
    binarized_channel = channel.copy().astype('uint8')
    binarized_channel[mask] = 255
    binarized_channel[~mask] = 0
    # Eliminar ruido
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(
        binarized_channel, cv2.MORPH_OPEN, kernel, iterations=3)
    # Área que con seguridad no es una célula
    sure_bg = cv2.dilate(opening, kernel, iterations=3)
    # Area que con seguridad está dentro de la célula
    # almost_sure_fg = cv2.erode(opening, kernel, iterations=1)
    dist_transform = cv2.distanceTransform(
        opening, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
    _, sure_fg = cv2.threshold(
        dist_transform, 0.1 * dist_transform.max(),
        255, cv2.THRESH_BINARY)
    # Área en la cual no tenemos certeza
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    return sure_bg, sure_fg, unknown
Beispiel #14
0
def to_binary_image(image):
    """
        convert image to binary image using custom threshold algorithm
    """
    threshold_value = threshold_yen(image)
    binary_car_image = image < threshold_value
    return binary_car_image
def gray_to_binary_with_thresh_method(image, is_gray, thresh_method):
    ### Set image to graycale if needed
    if is_gray == 0:
        ### Convert to grayscale
        grayscale = rgb2gray(image)
    elif is_gray == 1:
        ### Does nothing to the image, already grayscale
        grayscale = image
    ### Determine threshold method to use according to parameter thresh_method
    if thresh_method == 'yen':
        ### Calculate treshold according to yen method
        thresh = threshold_yen(grayscale)
    elif thresh_method == 'isodata':
        ### Calculate treshold according to isodata method
        thresh = threshold_isodata(grayscale)
    else:
        print('Invalid threshold method! Please choose isodata or yen. ')

    ### Generate binary image
    binary = grayscale > thresh
    ### Show thresholded image
    plt.imshow(binary, cmap='gray')
    plt.show()
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    ### Return binary image
    return binary
Beispiel #16
0
def psIImask(img, mode='thresh'):
    # pcv.plot_image(img)
    if mode is 'thresh':

        # this entropy based technique seems to work well when algae is present
        algaethresh = filters.threshold_yen(image=img)
        threshy = pcv.threshold.binary(img, algaethresh, 255, 'light')
        # mask = pcv.dilate(threshy, 2, 1)
        mask = pcv.fill(threshy, 250)
        mask = pcv.erode(mask, 2, 1)
        mask = pcv.fill(mask, 100)
        final_mask = mask  # pcv.fill(mask, 270)

    elif isinstance(mode, pd.DataFrame):
        mode = curvedf
        rownum = mode.imageid.values.argmax()
        imgdf = mode.iloc[[1, rownum]]
        fm = cv2.imread(imgdf.filename[0])
        fmp = cv2.imread(imgdf.filename[1])
        npq = np.float32(np.divide(fm, fmp, where=fmp != 0) - 1)
        npq = np.ma.array(fmp, mask=fmp < 200)
        plt.imshow(npq)
        # pcv.plot_image(npq)

        final_mask = np.zeros_like(img)

    else:
        pcv.fatal_error(
            'mode must be "thresh" (default) or an object of class pd.DataFrame'
        )

    return final_mask
Beispiel #17
0
def plot_bi(filename):
    image = DatToMatrix(filename)
    # dst =filters.threshold_local(data,11,'median')
    thresh = filters.threshold_yen(image)
    print "threshold:", thresh
    dst = (image <= thresh) * 1.0
    plt.figure('thresh', figsize=(11, 11))
    plt.subplot(121)
    plt.title('original image')
    plt.imshow(image, plt.cm.gray)
    plt.subplot(122)
    plt.title('binary image')
    plt.imshow(dst, plt.cm.gray)

    dst1 = sm.erosion(image, sm.square(1))
    dst2 = sm.erosion(image, sm.square(5))
    dst3 = sm.erosion(image, sm.square(6))
    dst4 = sm.erosion(image, sm.square(3))
    plt.figure('morphology', figsize=(11, 11))
    plt.subplot(141)
    plt.title('morphological image_filter1*1')
    plt.imshow(dst1, plt.cm.gray)
    plt.subplot(142)
    plt.title('morphological image_filter5*5')
    plt.imshow(dst2, plt.cm.gray)
    plt.subplot(143)
    plt.title('morphological image_filter6*6')
    plt.imshow(dst3, plt.cm.gray)
    plt.subplot(144)
    plt.title('morphological image_filter7*7')
    plt.imshow(dst4, plt.cm.gray)

    plt.show()
Beispiel #18
0
    def __call__(self, img):
        # img is a numpy rgb image
        grey_img = rgb2grey(img)
        t1 = filters.threshold_minimum(grey_img)
        t2 = filters.threshold_yen(grey_img)

        img1 = mark_boundaries(img, (grey_img > t1), color=(1, 0, 0))
        img1 = mark_boundaries(img1, (grey_img > t2), color=(1, 0, 0))
        img2 = mark_boundaries(img, grey_img < 0)
        img = ((img1 + img2) / 2)

        #img = mark_boundaries(img, quickshift(img_as_float(img), kernel_size =5, max_dist = 10, ratio = 1.0))

        #img = mark_boundaries(img, slic(img_as_float(img), n_segments=10))
        #fimg = rgb2grey(img)
        #t = filters.threshold_otsu(fimg)
        #img = mark_boundaries(img, (fimg > t).astype(np.uint8), color=(1,0,0))
        #img  = mark_boundaries(img, (fimg - filters.threshold_niblack(fimg)< 0).astype(np.uint8), color=(1,0,0))

        #img_gray = rgb2grey(img)
        #img_gray = img[:, :, 1]
        # morphological opening (size tuned on training data)
        #circle7 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        #img_open = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, circle7)
        # Otsu thresholding
        #img_th = cv2.threshold(img_open, 0, 255, cv2.THRESH_OTSU)[1]
        # Invert the image in case the objects of interest are in the dark side
        #if (np.sum(img_th == 255) > np.sum(img_th == 0)):
        #    img_th = cv2.bitwise_not(img_th)
        # second morphological opening (on binary image this time)
        #bin_open = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, circle7)
        # connected components
        #img = mark_boundaries(img,cv2.connectedComponents(bin_open)[1], color=(1,0,0))

        return (img * 255).astype(np.uint8)
def thresholding(image, threshold):
	if isinstance(threshold,int) or isinstance(threshold,float):
		thresh = threshold
	elif isinstance(threshold,str):
		# Assume its Ok to use the same threshold for each layer.
		parsestr = threshold.split(" ")
		parsestr = [i.lower() for i in parsestr]
		parsestr = set(parsestr)
		if "otsu" in parsestr:
			if "global" in parsestr:
				ind = np.argmax([np.mean(i) for i in image])
				thresh = filters.threshold_otsu(image[ind])
				print thresh, ind
			elif "local" in parsestr:
				radius = int(parsestr[2])
				mask = morphology.disk(radius)
				thresh = filters.rank.otsu(image,mask)
		if "li" in parsestr:
			thresh = filters.threshold_li(image)
		if "yen" in parsestr:
			thresh = filters.threshold_yen(image)

	threshinds = image<thresh
	binimg =  np.ones(image.shape)
	binimg[threshinds] = 0
	return binimg, thresh
Beispiel #20
0
def experiment_yen_threshold(image=None):
    images = image_generator()
    if image is not None:
        img_path = os.path.join(BASE_DIR, "data/imgs/")
        fn = os.path.join(img_path, image)
        im = load_images([fn])[0].astype("int32")
        images = itertools.chain([(fn, im)], images)

    for fn, im in images:
        imgs = [im]
        titles = ["Original"]
        print("computing yen threshold")
        yen = threshold_yen(im)
        channels = threshold_by_channel(im, yen)
        titles.append("Yen Thresholded")
        imgs.append(channels[0])

        for cp in [175, 200, 225, 250]:
            titles.append("Cutpoint %d" % cp)
            print("cutting")
            rtn = np.zeros(im.shape)
            rtn[np.average(im, axis=2, weights=[0.7, 0.1, 0.2]) > cp, 0] = 255
            for i in range(1, 3):
                rtn[:, :, i] = rtn[:, :, 0]

            imgs.append(rtn)

        print("plotting")
        plot_images(imgs, titles=titles, suptitle=fn.split("/")[-1])
Beispiel #21
0
    def get_vert_lines(self, sz_filt):

        img = self.CleanedImage

        img -= np.min(img)
        img /= np.max(img)

        # Smooth image
        if sz_filt > 0:
            img = filters.median(img, morphology.disk(sz_filt))

        # Use vertical Sobel filter (since we know the lines are always the same orientation)
        sb = filters.sobel_v(img)

        # Normalize data between 0 and 1
        sb -= np.min(sb)
        sb /= np.max(sb)

        # Find Yen threshold
        thr = filters.threshold_yen(sb)

        thr_img = sb < thr
        if np.sum(thr_img) > np.sum(~thr_img):
            thr_img = ~thr_img

        thr_img = morphology.binary_dilation(thr_img, morphology.square(4))
        thr_img = morphology.binary_erosion(thr_img, morphology.square(4))

        return thr_img
Beispiel #22
0
def count_ships(path, seg, date_mask):
    struct = disk(2)

    seg = binary_closing(seg, struct).astype(np.bool)
    im = imread(path)
    im = rgba2rgb(im)
    grayscale = rgb2gray(im)

    thresh = threshold_yen(grayscale)
    yen = grayscale > thresh

    eroded = binary_erosion(seg, struct).astype(np.bool)

    grayscale = grayscale > 0.6

    grayscale[(eroded == 0) | (date_mask == 1) | (yen == 0)] = 0

    grayscale[date_mask == 1] = 0
    grayscale = binary_fill_holes(grayscale)
    grayscale = clear_border(grayscale)
    l = label(grayscale)
    for region in regionprops(l):
        if region.area > 100 or region.extent < 0.2:
            grayscale = flood_fill(grayscale, tuple(region.coords[0, :]), 0)

    label_image, num = label(grayscale, return_num=True)
    return num, grayscale
Beispiel #23
0
def skimage_cropping(filepath, target_height, target_width):
    # First crop
    img = imread(filepath)
    image_crop = img[160:160 + 880, 580:580 + 820]

    # Masking/closing pixel regions and labeling pixels
    thresh = threshold_yen(image_crop)
    img_closing = closing(image_crop > thresh, square(3))
    img_label = sk_label(img_closing)

    # Search for biggest area and extract centroid
    max_area = 0
    for region in regionprops(img_label):
        if region.area > max_area:
            max_area = region.area
            biggest = region
    center = biggest.centroid

    # Draw square bounding box around centroid
    square_side = 300
    step = square_side / 2
    min_row, max_row, min_col, max_col = max([0, int(center[0]-step)]),\
                                         int(center[0]+step), \
                                         max([0, int(center[1]-step)]),\
                                         int(center[1]+step)

    # Crop and resize image to square bounding box
    image_square = image_crop[min_row:max_row, min_col:max_col]
    image_resize = resize(image_square, [target_height, target_width],
                          preserve_range=True).astype(np.uint8)

    return image_resize
Beispiel #24
0
def clean_region(region_bw, I_sub):
    vals = I_sub.ravel()
    bw_idx = region_bw.ravel().astype('bool')
    vals = vals[bw_idx]
    T = filters.threshold_yen(vals)
    new_bw = (I_sub < T) * region_bw
    # try find contours
    return (new_bw)
Beispiel #25
0
def attempt2(img):
    pimg = img
    pimg = exposure.adjust_sigmoid(pimg, gain=100)
    thr = filters.thresholding.threshold_otsu(pimg)
    pimg = pimg > thr
    num_white = np.sum(pimg)
    num_black = np.prod(pimg.shape) - num_white
    if num_white > num_black:
        pimg = 1 - pimg
    pimg = morphology.skeletonize(pimg)

    lines = transform.probabilistic_hough_line(pimg)

    directions = np.array([angle(point1, point2) for point1, point2 in lines])
    directions = np.where(directions < 0, directions + 180, directions)

    hist = np.histogram(directions, range=[0, 180], bins=180)
    sort_indexes = np.argsort(hist[0])
    hist = hist[0][sort_indexes], hist[1][sort_indexes]

    a1, a2 = hist[1][-2:]

    rot_pimg1 = transform.rotate(pimg, a1 - 90, resize=True)
    rot_pimg2 = transform.rotate(pimg, a2 - 90, resize=True)
    width = 3
    kernel1 = np.pad(np.ones([rot_pimg1.shape[0], width]),
                     1,
                     mode='constant',
                     constant_values=-1)
    kernel2 = np.pad(np.ones([rot_pimg2.shape[0], width]),
                     1,
                     mode='constant',
                     constant_values=-1)
    corr1 = ndimage.correlate(rot_pimg1, kernel1, mode='constant')
    corr2 = ndimage.correlate(rot_pimg2, kernel2, mode='constant')
    corr_rot1 = cut(transform.rotate(corr1, 90 - a1, resize=True), pimg.shape)
    corr_rot2 = cut(transform.rotate(corr2, 90 - a2, resize=True), pimg.shape)

    binary1 = np.where(corr_rot1 > filters.threshold_yen(corr1), 1, 0)
    binary2 = np.where(corr_rot2 > filters.threshold_yen(corr2), 1, 0)
    selem = np.ones((10, 10))
    binary_dilated1 = morphology.dilation(binary1, selem=selem)
    binary_dilated2 = morphology.dilation(binary2, selem=selem)

    mask = np.logical_or(binary_dilated1, binary_dilated2)
    return (1 - mask) * pimg
Beispiel #26
0
def restaurar(img):
    yen_threshold = threshold_yen(img) + 15
    bright = rescale_intensity(img, (0, yen_threshold), (0, 255))
    # cv.imshow('bright', bright)

    bright = filtro_non_local_mean(bright)
    bright = filtro_mediana(bright, 3)
    return bright
def EnhanceContrast(img):
    img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
    yen_threshold = threshold_yen(img)
    bright = rescale_intensity(img, (0, yen_threshold), (0, 255))
    # increase line width
    kernel = np.ones((3, 3), np.uint8)
    imgMorph = cv.erode(bright, kernel, iterations=1)
    return imgMorph
Beispiel #28
0
def make_tr_mask(tr_cropped):
    tr_threshold = threshold_yen(tr_cropped)
    tr_mask = tr_cropped > tr_threshold
    if len(np.unique(tr_mask)) != 1:
        tr_clean = remove_small_objects(tr_mask, min_size=5)
    else:
        tr_clean = np.zeros(tr_mask.shape, dtype=bool)
    return tr_clean
def extract_roi(subjects, data_folder, out_folder, th_method='otsu'):
    print(f'Building model ...')
    stable_model = np.zeros(shape=(256, 256, 256))
    conversion_model = np.zeros_like(stable_model)
    n_stables = n_converters = 0  # Reset counters
    affine = np.eye(4)  # Default affine

    for subject in tqdm(subjects, ncols=150, desc='Subjects', unit='subj'):
        # Load subject volume
        mag, angle, affine, label = load_subject_volume(subject_id=subject,
                                                        data_folder=data_folder,
                                                        space='edge')
        if mag is None:
            continue  # Ignore subject

        # Add up to model
        if label == 'MCInc':
            stable_model += mag
            n_stables += 1
        elif label == 'MCIc':
            conversion_model += mag
            n_converters += 1
    # Compute average models
    stable_model /= n_stables
    conversion_model /= n_converters
    difference_map = np.abs(stable_model - conversion_model)

    # Thresholding
    if th_method == 'otsu':
        threshold = filters.threshold_otsu(difference_map[np.where(difference_map > 0)])
    else:
        threshold = filters.threshold_yen(difference_map)

    # Raw ROI and eroded
    roi_mask = (difference_map >= threshold).astype(np.int8)
    roi_mask_closed = ndi.binary_erosion(roi_mask).astype(np.int8)

    models_dict = {
        'MCInc': stable_model,
        'MCIc': conversion_model,
        'diff': difference_map,
        f'{th_method}_mask': roi_mask,
        'eroded_mask': roi_mask_closed
    }

    # Create output folder
    os.makedirs(out_folder, exist_ok=True)

    # Create and save models as NIFTI images
    print(f'Saving models ...')
    for label, vol in models_dict.items():
        nii_model = nb.Nifti1Image(vol, affine)
        nb.save(nii_model, join(out_folder, f'{label.lower()}_model.nii.gz'))
    print(f'\t- Stable subjects: {n_stables}')
    print(f'\t- Converter subjects: {n_converters}')
    print(f'\t- Threshold: {threshold:.2f} ({th_method})')
    print(f'\t- Stats: {describe(stable_model.ravel())}')
    return roi_mask, difference_map
Beispiel #30
0
    def transform(self, images):
        binary_images = []

        for i in range(len(images)):
            images[i] = np.array(images[i]).reshape(self.shape)
            thresh_yen = threshold_yen(images[i])
            binary_images.append((images[i] > thresh_yen).ravel())

        return binary_images
Beispiel #31
0
def gt_transform(im):
    im = rgb2gray(im)
    if (GT_TRANSFORM == 'img_as_bool'):
        return img_as_bool(im)
    elif (GT_TRANSFORM == 'threshold_yen'):
        return im > threshold_yen(im)
    else:
        raise NotImplementedError(
            '`{}` tranform for GT not implemented.'.format(GT_TRANSFORM))
Beispiel #32
0
def auto_brightness_and_contrast(image: np.ndarray,
                                 percent_factor=300) -> np.ndarray:
    """
    See:
    https://stackoverflow.com/questions/56905592/automatic-contrast-and-brightness-adjustment-of-a-color-photo-of-a-sheet-of-pape
    """

    yen_threshold = threshold_yen(image) * percent_factor / 100
    return rescale_intensity(image, (0, yen_threshold), (0, image.max()))
Beispiel #33
0
 def run(self, image):
     if not self.high:
         self.high = filters.threshold_yen(image)
     else:
         self.high = self.high * image.max()
     low = self.high * (1 - self.expansion)
     self.mask = filters.apply_hysteresis_threshold(image, low, self.high)
     self.image = self.apply_mask(image, self.mask)
     return self.image
Beispiel #34
0
def experiement_yen_regions_bounding_rectangle(file=None, dilation_iterations=40, num_regions=5, radius=2):
    from skimage.transform import warp
    from skimage.transform import SimilarityTransform

    images = image_generator()
    images = add_image_to_image_generator(images, file)
    n_points = 8 * radius

    for fn, im in images:
        image_arrays = []
        histograms = []
        image_titles = []
        hist_titles = []

        image_arrays.append(im)
        image_titles.append("original image")

        yen = threshold_yen(im)
        yen_channels = threshold_by_channel(im, yen)
        image_titles.append("Yen Thresholded Channel 1")
        image_arrays.append(yen_channels[0])

        binary_image = yen_channels[0][:, :, 0] > 0
        structure = calculate_binary_opening_structure(binary_image)
        binary_image = binary_opening(binary_image, structure=structure)
        binary_image = binary_dilation(binary_image, iterations=dilation_iterations)
        regions, labels = extract_largest_regions(binary_image, num_regions=num_regions)

        for label in labels:
            print("Cropping region %d" % label)
            region = np.zeros(yen_channels[0].shape[:-1])
            region[regions == label] = yen_channels[0][regions == label, 0]

            # convex hull
            verts = convex_hull_mask(region > 0, mask=False)
            corners, rot, angle = minimum_bounding_rectangle(verts)

            yen_crop = rotate_crop_gray_image_from_mbr(region, corners, rot, angle)
            image_titles.append("Yen Region %d Crop" % label)
            image_arrays.append(yen_crop)

            rgb_crop = rotate_crop_rgb_image_from_mbr(im, corners, rot, angle)
            image_titles.append("RGB Crop %d" % label)
            image_arrays.append(rgb_crop)

        plot_images_and_histograms(
            image_arrays=image_arrays, image_titles=image_titles, image_suptitle=fn.split("/")[-1]
        )
Beispiel #35
0
def experiment_yen_mask_rescale(file=None):
    images = image_generator()
    if file is not None:
        images = add_image_to_image_generator(images, file)

    claheizer = cv2.createCLAHE()

    for fn, im in images:
        image_arrays = []
        titles = []

        # plot original image
        titles.append("Original")
        image_arrays.append(im)

        # thresholded
        yen = threshold_yen(im)
        thresholded = threshold_by_channel(im, yen)[0]
        titles.append("Yen Thresholded")
        image_arrays.append(thresholded)

        # masked and equalized
        masked = im.copy()
        masked[thresholded <= 0] = 0

        titles.append("Masked")
        image_arrays.append(masked)

        # equalize
        from skimage.util import img_as_ubyte

        equalized = masked.copy()
        for chan in range(1, 3):
            img = masked[:, :, chan].astype("uint8")
            rng = (0, 175) if chan == 1 else (175, 255)
            rescaled = exposure.rescale_intensity(img, in_range=rng)
            equalized[:, :, chan] = rescaled
            break

        titles.append("Equalied")
        image_arrays.append(equalized)

        # plot
        subplot_images(image_arrays, titles=titles, show_plot=True, suptitle=fn.split("/")[-1])
Beispiel #36
0
def temp_filter_method_adaptive_thresholding(imageFile):
    img = data.imread(imageFile, as_grey=True)

    global_thresh = threshold_yen(img)
    # True False binary matrix represent color value of the img using global thresholding
    binary_global = img > global_thresh

    block_size = 40

    # True False binary matrix represent color value of the img using adaptive thresholding
    binary_adaptive = threshold_adaptive(img, block_size, offset=0)

    # 0 1 binary matrix
    img_bin_global = clear_border(img_as_uint(binary_global))

    # 0 1 binary matrix 
    img_bin_adaptive = clear_border(img_as_uint(binary_adaptive))


    bin_pos_mat = ocr.binary_matrix_to_position(binary_adaptive)

    np.savetxt("test.txt",bin_pos_mat) # %.5f specifies 5 decimal round
def CANNY(rgbImage):
    image = color.rgb2gray(rgbImage)

    image = image[100:-100, 100:-100]
    image = transform.resize(image, (1000, 1000))

    edges_mask = canny(image)
    image[edges_mask] = 0.0

    val = filters.threshold_yen(image)
    mask = image <= val

    regions = list(regionprops(label(mask)))
    if len(regions) == 0:
        return image
    biggest_region = max(regions, key=lambda x: x.area)

    minr, minc, maxr, maxc = biggest_region.bbox
    image = image[minr:maxr, minc:maxc]

    image = transform.resize(image, (100, 100))

    return image
Beispiel #38
0
def experiment_thresholding(votes_min=3):
    from skimage.filters import threshold_otsu
    from skimage.filters import threshold_li
    from skimage.filters import threshold_yen
    from skimage.filters import threshold_adaptive
    from scipy.ndimage import median_filter

    images = image_generator()

    for fn, im in images:
        print("inspecting image: ", fn)
        print("computing otsu threshold")
        otsu = threshold_otsu(im)
        otsu_ch1 = np.zeros(im.shape)
        otsu_ch2 = np.zeros(im.shape)
        otsu_ch3 = np.zeros(im.shape)
        otsu_ch1[im[:, :, 0] > otsu, 0] = 255
        otsu_ch2[im[:, :, 1] > otsu, 1] = 255
        otsu_ch3[im[:, :, 2] > otsu, 2] = 255
        otsu_ch1 = smallest_partition(otsu_ch1, 0)
        otsu_ch2 = smallest_partition(otsu_ch2, 1)
        otsu_ch3 = smallest_partition(otsu_ch3, 2)

        print("computing yen threshold")
        yen = threshold_yen(im)
        yen_ch1 = np.zeros(im.shape)
        yen_ch2 = np.zeros(im.shape)
        yen_ch3 = np.zeros(im.shape)
        yen_ch1[im[:, :, 0] > yen, 0] = 255
        yen_ch2[im[:, :, 1] > yen, 1] = 255
        yen_ch3[im[:, :, 2] > yen, 2] = 255
        yen_ch1 = smallest_partition(yen_ch1, 0)
        yen_ch2 = smallest_partition(yen_ch2, 1)
        yen_ch3 = smallest_partition(yen_ch3, 2)

        print("computing li threshold")
        li = threshold_li(im)
        li_ch1 = np.zeros(im.shape)
        li_ch2 = np.zeros(im.shape)
        li_ch3 = np.zeros(im.shape)
        li_ch1[im[:, :, 0] > li, 0] = 255
        li_ch2[im[:, :, 1] > li, 1] = 255
        li_ch3[im[:, :, 2] > li, 2] = 255
        li_ch1 = smallest_partition(li_ch1, 0)
        li_ch2 = smallest_partition(li_ch2, 1)
        li_ch3 = smallest_partition(li_ch3, 2)

        print("computing average threshold")
        av_ch1 = np.zeros(im.shape)
        av_ch2 = np.zeros(im.shape)
        av_ch3 = np.zeros(im.shape)
        votes1 = otsu_ch1 + yen_ch1 + li_ch1
        votes1 = otsu_ch1 + yen_ch1 + li_ch1
        votes2 = otsu_ch2 + yen_ch2 + li_ch2
        votes3 = otsu_ch3 + yen_ch3 + li_ch3
        av_ch1[votes1[:, :, 0] >= (255 * votes_min), 0] = 255
        av_ch2[votes2[:, :, 1] >= (255 * votes_min), 1] = 255
        av_ch3[votes3[:, :, 2] >= (255 * votes_min), 2] = 255

        thresholded_images = [
            otsu_ch1,
            otsu_ch2,
            otsu_ch3,
            yen_ch1,
            yen_ch2,
            yen_ch3,
            li_ch1,
            li_ch2,
            li_ch3,
            av_ch1,
            av_ch2,
            av_ch3,
        ]

        print("filtering out specks")
        for idx, im in enumerate(thresholded_images):
            thresholded_images[idx] = median_filter(im, size=3)

        titles = [
            "Channel 1 Otsu",
            "Channel 2 Otsu",
            "Channel 3 Otsu",
            "Channel 1 Yen",
            "Channel 2 Yen",
            "Channel 3 Yen",
            "Channel 1 Li",
            "Channel 2 Li",
            "Channel 3 Li",
            "Channel 1 Avg",
            "Channel 2 Avg",
            "Channel 3 Avg",
        ]

        print("plotting")
        plot_images(thresholded_images, titles=titles, suptitle=fn.split("/")[-1])
 def get_threshold(self, image):
     return Threshold.color_in_range(image, threshold_yen(image))
    def create_slicemap_config(self):
        filter = self.filter_name
        slicemaps_paths = []
        for i in range(0, self.slicemaps_number):
            slicemaps_paths.append(
                os.path.join(
                    self.relative_path_to_slicemaps_dir,
                    self.slicemap_name_format.format(
                        i,
                        self.row_col[0],
                        self.row_col[1],
                        int(self.slicemap_size[0]),
                        int(self.slicemap_size[1]),
                        self.filter_name,
                    ),
                )
            )

        rows_cols = self.row_col
        slicemap_size = self.slicemap_size
        slices_range = self.slices_range
        original_slice_size = self.original_slice_size
        volume_size = [
            self.area_of_slice[1][0] - self.area_of_slice[0][0],
            self.area_of_slice[1][1] - self.area_of_slice[0][1],
            self.slices_range[1] - self.slices_range[0],
        ]
        area_of_slice = [self.area_of_slice[0], self.area_of_slice[1]]

        slice_path_to_middle_slice = self.slices_path_list[int((self.slices_range[0] + self.slices_range[1]) / 2)]
        middle_slice = Image.open(slice_path_to_middle_slice)
        middle_slice_numpy_array = numpy.array(middle_slice)
        threshold_otsu_index = threshold_otsu(middle_slice_numpy_array) / 255.0
        threshold_isodata_index = threshold_isodata(middle_slice_numpy_array) / 255.0
        threshold_yen_index = threshold_yen(middle_slice_numpy_array) / 255.0
        threshold_li_index = threshold_li(middle_slice_numpy_array) / 255.0

        print("******************************************************************")
        print("Slicemap size:                   {0},{1} px".format(self.slicemap_size[0], self.slicemap_size[1]))
        print("Number of slicemaps:             {0}".format(self.slicemaps_number))
        print("Rows and cols:                   {0},{1}".format(rows_cols[0], rows_cols[1]))
        print("Number of slices:                {0}".format(self.number_of_slices))
        print(
            "Original slice size:             {0},{1} px".format(
                int(self.original_slice_size[0]), int(self.original_slice_size[1])
            )
        )
        print(
            "Slicemap slice size:             {0},{1} px".format(
                int(self.slicemap_slice_size[0]), int(self.slicemap_slice_size[1])
            )
        )
        print(
            "Proposional slicemap slice size: {0},{1} px".format(
                self.proposional_slicemap_slice_size[0], self.proposional_slicemap_slice_size[1]
            )
        )
        print("Volume proportions:              {0},{1},{2}".format(volume_size[0], volume_size[1], volume_size[2]))
        print("Interpolation filter name:       {0}".format(self.filter_name))
        print("Number of rows and cols:         {0},{1}".format(self.row_col[0], self.row_col[1]))
        print(
            "Global path to slicemaps:        {0}".format(
                os.path.join(
                    self.global_path_to_slicemaps_dir,
                    self.slicemap_name_format.format(
                        "n",
                        self.row_col[0],
                        self.row_col[1],
                        int(self.slicemap_size[0]),
                        int(self.slicemap_size[1]),
                        self.filter_name,
                    ),
                )
            )
        )
        print(
            "Relative path to slicemaps:      {0}".format(
                os.path.join(
                    self.relative_path_to_slicemaps_dir,
                    self.slicemap_name_format.format(
                        "n",
                        self.row_col[0],
                        self.row_col[1],
                        int(self.slicemap_size[0]),
                        int(self.slicemap_size[1]),
                        self.filter_name,
                    ),
                )
            )
        )
        print("Path to slices:                  {0}".format(self.path_to_slices))
        print("Name of fisrt slice:             {0}".format(self.files_list[0]))
        print(
            "Name of fisrt slicemaps:         {0}".format(
                self.slicemap_name_format.format(
                    0,
                    self.row_col[0],
                    self.row_col[1],
                    int(self.slicemap_size[0]),
                    int(self.slicemap_size[1]),
                    self.filter_name,
                )
            )
        )
        print("Area_of_slice:                   {0},{1} px".format(self.area_of_slice[0], self.area_of_slice[1]))
        print("threshold_otsu_index:            {0}".format(threshold_otsu_index))
        print("threshold_isodata_index:         {0}".format(threshold_isodata_index))
        print("threshold_yen_index:             {0}".format(threshold_yen_index))
        print("threshold_li_index:              {0}".format(threshold_li_index))

        data = {}
        data["filter"] = self.filter_name
        data["slicemaps_paths"] = []
        for i in range(0, self.slicemaps_number):
            data["slicemaps_paths"].append(
                os.path.join(
                    self.relative_path_to_slicemaps_dir,
                    self.slicemap_name_format.format(
                        i,
                        self.row_col[0],
                        self.row_col[1],
                        int(self.slicemap_size[0]),
                        int(self.slicemap_size[1]),
                        self.filter_name,
                    ),
                )
            )

        data["row_col"] = rows_cols
        data["slicemap_size"] = slicemap_size
        data["slices_range"] = slices_range
        data["original_slice_size"] = original_slice_size
        data["slicemap_slice_size"] = self.slicemap_slice_size
        data["volume_size"] = volume_size
        data["area_of_slice"] = area_of_slice
        data["threshold_indexes"] = {
            "otsu": threshold_otsu_index,
            "isodata": threshold_isodata_index,
            "yen": threshold_yen_index,
            "li": threshold_li_index,
        }

        jsonString = json.dumps(data)

        print("************ CONFIG BEGIN ************ ")
        print(jsonString)
        print("************ CONFIG END ************** ")

        if not (os.path.exists(self.path_to_configs_dir)):
            os.makedirs(self.path_to_configs_dir)

        config_file = open(self.path_to_config, "w")
        config_file.write(jsonString)
        config_file.close()
Beispiel #41
0
def experiment_yen_distort_reds(file=None):
    images = image_generator()
    if file is not None:
        images = add_image_to_image_generator(images, file)

    for fn, im in images:
        image_arrays = []
        titles = []

        # plot original image
        titles.append("Original")
        image_arrays.append(im)

        yen = threshold_yen(im)
        thresholded = threshold_by_channel(im, yen)[0]
        titles.append("Yen Thresholded")
        image_arrays.append(thresholded)

        # equalize
        equalized = np.zeros_like(im)
        for chan in range(3):
            equalized[:, :, chan] = (exposure.equalize_hist(im[:, :, chan]) * 255).astype("uint8")

        titles.append("Equalized")
        image_arrays.append(equalized)

        # experiments point to a reasonable threshold of 10 or 20
        thresh = 140  # TUNE: 160
        reds_ycbcr = extract_brightest_reds(equalized, thresh=thresh, verbose=False)
        titles.append("Reds Thresholded YCBCR %d" % thresh)
        image_arrays.append(reds_ycbcr)

        filled_ycbcr = reds_ycbcr.copy()
        mask = ndimage.binary_fill_holes((reds_ycbcr[:, :, 0] > 0))
        # mask = ndimage.binary_fill_holes((np.amax(reds_ycbcr, axis=2) > 0))
        filled_ycbcr[mask, :] = np.array([255, 0, 0])

        titles.append("Filled YCBCR Reds")
        image_arrays.append(filled_ycbcr)

        smoothed_ycbcr = reds_ycbcr.copy()
        smoothed_ycbcr[:, :, 0] = binary_closing(smoothed_ycbcr[:, :, 0] > 0, iterations=3) * 255
        mask = np.amax(smoothed_ycbcr, axis=2) > 0
        structure = calculate_binary_opening_structure(mask, weight=1, hollow=False)
        mask = binary_opening(mask, structure=structure, iterations=1)
        smoothed_ycbcr[mask <= 0, :] = 0

        titles.append("Smoothed YCBCR Reds")
        image_arrays.append(smoothed_ycbcr)

        # plot
        subplot_images(image_arrays, titles=titles, show_plot=True, suptitle=fn.split("/")[-1])
        continue

        # segmented image
        segmented = segment_image(smoothed_ycbcr, n_segments=3, compactness=100, sigma=2)
        titles.append("Segmented")
        image_arrays.append(segmented)

        titles.append("Least Gray Bright Region")
        image_arrays.append(bright)

        d, m, b = extract_colors_grays(segmented, n_grays=6)
        grays = select_colors(segmented, [d, m, b])

        titles.append("Gray Regions of Varied Intensity")
        image_arrays.append(grays)

        quantized = quantize_colors(
            segmented,
            n_colors=3,
            n_samples=1000,
            max_iter=300,
            n_init=10,
            n_jobs=1,
            random_state=SEED,
            verbose=True,
            split=False,
        )

        titles.append("Quantized Image %d Colors" % 3)
        image_arrays.append(quantized)

        quantized = quantize_colors(
            segmented,
            n_colors=4,
            n_samples=1000,
            max_iter=300,
            n_init=10,
            n_jobs=1,
            random_state=SEED,
            verbose=True,
            split=False,
        )

        titles.append("Quantized Image %d Colors" % 4)
        image_arrays.append(quantized)

        quantized = quantize_colors(
            segmented,
            n_colors=5,
            n_samples=1000,
            max_iter=300,
            n_init=10,
            n_jobs=1,
            random_state=SEED,
            verbose=True,
            split=False,
        )

        titles.append("Quantized Image %d Colors" % 5)
        image_arrays.append(quantized)

        subplot_images(image_arrays, titles=titles, show_plot=True, suptitle=fn.split("/")[-1])
print(im.shape, im.dtype)

binThrAd = filters.threshold_adaptive(im,21, method='median', offset=0, mode='reflect', param=None)

thrIso = filters.threshold_isodata(im, nbins=256, return_all=False)
binIso = im > thrIso
tiIso = "Iso %d" % thrIso
print(thrIso)

thrLi = filters.threshold_li(im)
binLi = im > thrLi
tiLi = "Li %d" % thrLi

thrOtsu = filters.threshold_otsu(im, nbins=256)
binOtsu = im > thrOtsu
tiOtsu = "Otsu %d" % thrOtsu

thrYen = filters.threshold_yen(im, nbins=256)
binYen = im > thrYen
tiYen = "Yen %d" % thrYen

# cImg = displayImg(binYen, name='coins-binYen')
# cImg.show()

fPan = displayAll([im, binThrAd, binIso, binLi, binOtsu, binYen], ["Original", "Adaptive", tiIso, tiLi, tiOtsu, tiYen], fSize=12)
fPan.show()



Beispiel #43
0
    #image = data.imread("/Users/exequiel/projects/roots/python/processing/1.15.AVI/selected/frame-54.tiff")

    block_size = 35
    gray = rgb2gray(image)
    global_thresh = threshold_otsu(gray)
    print global_thresh
    global_thresh = 0.65
    for t in np.linspace(global_thresh,1.0,20):
        binary_global = gray > t
        #binary_global = gray > 0.90

        print "threshold_otsu",threshold_otsu(gray)
        print "threshold_isodata",threshold_isodata(gray)
        print "threshold_li",threshold_li(gray)
        print "threshold_yen",threshold_yen(gray)




        label_img = label(binary_global, connectivity=binary_global.ndim)
        props = regionprops(label_img)
        boxes = []
        for pp in props:
            minr, minc, maxr, maxc = pp.bbox
            boxes += [(minc, minr, maxc - minc, maxr - minr)]


        valid_boxes = filter_valid_boxes(boxes,6,40,14,24,300,900)
        if len(valid_boxes) > 0:
            fig, ax = plt.subplots()
Beispiel #44
0
def segmentation(image, method='otsu'):

    if (method == 'region'):
        sobel_image = filters.sobel(image)
        
        makers = sobel_image < sobel_image.max()*0.1
        makers = ndi.label(makers)[0]
    
        labels = watershed(sobel_image,makers) 
    
    elif (method == 'edge'):
        edges = canny(image,3)
        fill = ndi.binary_fill_holes(edges)
        labels = remove_small_objects(fill,10)
    
    elif (method == 'self_design'):
        width = 100;
        scale = 0.72;        
        [m, n] = image.shape
        thre = np.zeros((m,n))
        
        for i in range(0,n):
            ind_s = max(0,int(np.ceil(i-width/2)))
            ind_e = min(n-1,ind_s+width)
            current_image  = image[0:m-1, ind_s:ind_e]
            thre[0:m-1, i] = filters.threshold_otsu(current_image)*0.8
        labels = (image - thre) >=0    
    
    elif (method == 'thre_cons'):
        global_thre = image.max() * 0.3
        labels = image > global_thre

    elif (method == 'global_otsu'):
        global_thre = filters.threshold_otsu(image)
        labels = image > global_thre
    
    elif (method == 'local_otsu'):
        selem=disk(80)
        local_otsu = filters.rank.otsu(image, selem)
        labels = image > (np.true_divide(local_otsu,255))
    
    elif (method == 'yen'):
        global_thre = filters.threshold_yen(image)
        labels = image > (global_thre*2.5)
    
    elif (method == 'li'):
        global_thre = filters.threshold_li(image)
        labels = image > global_thre
    
    elif (method == 'isodata'):     
        global_thre = filters.threshold_isodata(image)
        labels = image > global_thre
    
    elif (method == 'adaptive'):
        block_size = 100
        image = np.true_divide(image,image.max()+np.spacing(1)) * 255
        labels = filters.threshold_adaptive(image, block_size, offset=10)

    elif (method == 'R_Walker'):
        data = image + 0.35 * np.random.randn(*image.shape)
        markers = np.zeros(data.shape, dtype=np.uint)
        markers[data < -0.3] = 1
        markers[data > 1.3] = 2
        labels = random_walker(data, markers, beta=10, mode='cg_mg')
    
    return labels
Beispiel #45
0
def processImage(captchaPath, report=None, backgroundLayerPath=None, canny_sigma=0.1, corner_min_dist=2, debug = False):
	"""
		Processes the given image and returns a number of feature points.
		These feature points are presumably corner points of the tetragon.
	"""
	image = io.imread(captchaPath, True);
	
	image_without_bg = None
	
# remove background, if available
	if not None == backgroundLayerPath:
		bgl = io.imread(backgroundLayerPath, True)
		image_without_bg = (image - bgl)
	else:
		image_without_bg = image

# applying threshold operators
	thresh = threshold_otsu(image_without_bg)
	thrs_image = image_without_bg > thresh

	thresh_li = threshold_li(image_without_bg)
	thrs_image_li = image_without_bg > thresh_li

	thresh_yen = threshold_yen(image_without_bg)
	thrs_image_yen = image_without_bg > thresh_yen

#	thrs_image = thrs_image * thrs_image_li * thrs_image_yen
	thrs_image = image_without_bg * thrs_image
	thrs_image = thrs_image * thrs_image_li
	thrs_image = thrs_image * thrs_image_yen

#	thrs_image = ndi.gaussian_filter(thrs_image, 0.8)
# take the gray image
	gray_result = rgb2gray(thrs_image)

# find edges
#	canny_result = feature.canny(gray_result, sigma=0.5, low_threshold=1.8)
	canny_result = gray_result 

# store image
	processedImageName = captchaPath[0:len(captchaPath)-4] + "_processed.png"
	io.imsave(processedImageName, canny_result.clip(-1, 1))
	if not None == report:
		report.setProcessedImage(processedImageName)

	coords = feature.corner_peaks(feature.corner_harris(canny_result), min_distance=corner_min_dist)

	if debug:
		fig, axes = plt.subplots(nrows=6, figsize=(8, 3))
		ax0, ax1, ax2, ax3, ax4, ax5 = axes

		ax0.imshow(image, cmap=plt.cm.gray)
		ax0.set_title('Original image')
		ax0.axis('off')

		ax1.imshow(image_without_bg, cmap=plt.cm.gray)
		ax1.set_title('Image without background')
		ax1.axis('off')

		ax2.imshow(thrs_image, cmap=plt.cm.gray)
		ax2.set_title('Thresholded image')
		ax2.axis('off')

		ax3.imshow(gray_result, cmap=plt.cm.gray)
		ax3.set_title('After RGB -> Gray')
		ax3.axis('off')

		ax4.imshow(canny_result, cmap=plt.cm.gray)
		ax4.set_title('After Canny')
		ax4.axis('off')

		ax5.imshow(canny_result, cmap=plt.cm.gray)
		ax5.plot(coords[:, 1], coords[:, 0], '+r', markersize=15)
		ax5.set_title('Detected Features')
		ax5.axis('off')

		plt.show()

	return coords
def init_centroids(first_image_path, master_flat, master_dark, target_centroid,
                   max_number_stars=10, min_flux=0.2, plots=False):

    first_image = np.median([(fits.getdata(path) - master_dark)/master_flat
                             for path in first_image_path], axis=0)

    tophat_kernel = Tophat2DKernel(5)
    convolution = convolve_fft(first_image, tophat_kernel, fftn=fft2, ifftn=ifft2)

    convolution -= np.median(convolution)

    mad = mad_std(convolution)

    convolution[convolution < -5*mad] = 0.0

    from skimage.filters import threshold_yen
    from skimage.measure import label, regionprops

    thresh = threshold_yen(convolution)/4 # Use /4 for planet c, /2 for planet b
    #thresh = threshold_otsu(convolution)/15

    masked = np.ones_like(convolution)
    masked[convolution <= thresh] = 0

    label_image = label(masked)

    plt.figure()
    plt.imshow(label_image, origin='lower', cmap=plt.cm.viridis)
    plt.show()

    # regions = regionprops(label_image, convolution)
    regions = regionprops(label_image, first_image)

    # reject regions near to edge of detector
    buffer_pixels = 50
    regions = [region for region in regions
               if ((region.weighted_centroid[0] > buffer_pixels and
                   region.weighted_centroid[0] < label_image.shape[0] - buffer_pixels)
               and (region.weighted_centroid[1] > buffer_pixels and
                    region.weighted_centroid[1] < label_image.shape[1] - buffer_pixels))]

    #centroids = [region.weighted_centroid for region in regions]
    #intensities = [region.mean_intensity for region in regions]

    target_intensity = regions[0].mean_intensity
    target_diameter = regions[0].equivalent_diameter
    #  and region.equivalent_diameter > 0.8 * target_diameter
    centroids = [region.weighted_centroid for region in regions
                 if min_flux * target_intensity < region.mean_intensity]
    # intensities = [region.mean_intensity for region in regions
    #                if min_flux * target_intensity < region.mean_intensity]
#    centroids = np.array(centroids)[np.argsort(intensities)[::-1]]

    distances = [np.sqrt((target_centroid[0] - d[0])**2 +
                         (target_centroid[1] - d[1])**2) for d in centroids]

    centroids = np.array(centroids)[np.argsort(distances)]

    positions = np.vstack([[y for x, y in centroids], [x for x, y in centroids]])

    if plots:
        apertures = CircularAperture(positions, r=12.)
        apertures.plot(color='r', lw=2, alpha=1)
        plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
                   vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
                   origin='lower')
        plt.scatter(positions[0, 0], positions[1, 0], s=150, marker='x')

        plt.show()
    return positions
Beispiel #47
0
def experiment_yen_reds_fit_region(file=None):
    images = image_generator()
    if file is not None:
        images = add_image_to_image_generator(images, file)

    for fn, im in images:
        print(fn)
        image_arrays = []
        titles = []

        # plot original image
        titles.append("Original")
        image_arrays.append(im)

        # threshold
        yen = threshold_yen(im)
        thresholded = threshold_by_channel(im, yen)[0]
        titles.append("Yen Thresholded")
        image_arrays.append(thresholded)

        # reds
        equalized = np.zeros_like(im, dtype="uint8")
        for chan in range(3):
            equalized[:, :, chan] = (exposure.equalize_hist(im[:, :, chan]) * 255).astype("uint8")

        # reds
        thresh = 160
        reds = extract_brightest_reds(equalized, thresh=thresh, verbose=False)
        titles.append("Reds Thresholded YCBCR %d" % thresh)
        image_arrays.append(reds)

        # encode
        encoded = np.zeros_like(im[:, :, 0], dtype="uint8") * -1
        encoded[thresholded[:, :, 0] > 0] = 2
        encoded[np.amax(reds, axis=2) > 0] = 1
        encoded = encoded.astype("uint8")
        titles.append("Encoded")
        image_arrays.append(encoded)

        print("fitting reference frame")
        best_loc, best_val = None, 0
        for x in range(100, 1000, 100):
            print("x = ", x)
            for y in range(3 * x // 2, 1500, 100):
                print("y = ", y)
                for h in range(y // 3 - 1, y, y // 3):
                    for z in range(x // 3 - 1, x, x // 3):
                        for i in range(1, 4):
                            theta = i * np.pi / 3
                            reference = draw_reference_frame(x, y, z, h, theta)
                            res = cv2.matchTemplate(encoded, reference, cv2.TM_SQDIFF)
                            _, max_val, _, max_loc = cv2.minMaxLoc(res)
                            if max_val > best_val:
                                best_val = max_val
                                best_loc = max_loc
                                print(x, y, z, h, np.degrees(theta))
                                print(max_val, best_loc)
                                print("\n")

        # plot
        subplot_images(
            image_arrays,
            titles=titles,
            # show_plot=True,
            save_plot=True,
            suptitle="crop-" + fn.split("/")[-1],
        )
Beispiel #48
0
def experiment_yen_distort_reds_entropy_blues(file=None):
    images = image_generator()
    if file is not None:
        images = add_image_to_image_generator(images, file)

    for fn, im in images:
        image_arrays = []
        titles = []

        # plot original image
        titles.append("Original")
        image_arrays.append(im)

        # threshold
        yen = threshold_yen(im)
        thresholded = threshold_by_channel(im, yen)[0]
        titles.append("Yen Thresholded")
        image_arrays.append(thresholded)

        # distort
        equalized = np.zeros_like(im, dtype="uint8")
        for chan in range(3):
            equalized[:, :, chan] = (exposure.equalize_hist(im[:, :, chan]) * 255).astype("uint8")

        # entropy
        ent = skimage.filters.rank.entropy(equalized[:, :, 0], skimage.morphology.disk(3))
        titles.append("Entropy")
        image_arrays.append(ent)

        # thresholded
        thresh = 1.5
        blues = np.zeros_like(im, dtype="uint8")
        blues[ent > thresh, :] = 0
        blues[ent <= thresh] = 255
        titles.append("Entropy Blues Thresholded %d" % thresh)
        image_arrays.append(blues)

        # reds
        thresh = 160
        reds = extract_brightest_reds(equalized, thresh=thresh, verbose=False)
        titles.append("Reds Thresholded YCBCR %d" % thresh)
        image_arrays.append(reds)

        # union
        union = np.zeros_like(im)
        union[thresholded > 0] = 255
        union[reds > 0] = 255
        union[blues > 0] = 255
        titles.append("Union")
        image_arrays.append(union)

        for name, color_image in zip(["yen", "blues", "reds", "union"], [thresholded, blues, reds, union]):
            binary_image = color_image[:, :, 0] > 0
            structure = calculate_binary_opening_structure(binary_image)
            binary_image = binary_opening(binary_image, structure=structure)
            binary_image = binary_dilation(binary_image, iterations=3)
            if name == "yen":
                regions, labels = extract_largest_regions(binary_image, num_regions=2)
            else:
                regions, labels = extract_largest_regions(binary_image, num_regions=3)

            mask = convex_hull_mask(regions > 0, mask=True)
            print(mask.shape)
            crop = np.zeros_like(im)
            crop[mask > 0, :] = im[mask > 0, :]
            titles.append(name.title() + "Region")
            image_arrays.append(crop)

        # plot
        subplot_images(
            image_arrays,
            titles=titles,
            # show_plot=True,
            save_plot=True,
            suptitle="crop-" + fn.split("/")[-1],
        )
Beispiel #49
0
def yen_func(img_slice):
    return filters.threshold_yen(img_slice)
Beispiel #50
0
img_orig = '/home/andrea/Desktop/20160713_NCP_GO_Talos_121.jpg'
img_lowpass = '******'
out_dir = '/home/andrea/Desktop/test'
if not os.path.isdir(out_dir):
    os.makedirs(out_dir)
os.chdir(out_dir)
# print('Processing original')
# image_orig = imread(img_orig, as_grey=True, flatten = True)
# thresh_orig = threshold_isodata(image_orig)
# binary_orig = image_orig > thresh_orig
print('Processing lowpass isodata')
image = imread(img_lowpass, as_grey=True, flatten = True)
thresh_isodata = threshold_isodata(image)
thresh_otsu = threshold_otsu(image)
thresh_li = threshold_li(image)
thresh_yen = threshold_yen(image)
thresh_adaptive = threshold_adaptive(image, 3)
binary_isodata = image > thresh_isodata
binary_otsu = image > thresh_otsu
binary_li = image > thresh_li
binary_adaptive = image > thresh_adaptive
binary_yen = image > thresh_yen
# edges = canny(image_orig/255.)
# fill_image = ndi.binary_fill_holes(edges)
imshow(binary_isodata)
imshow(binary_otsu)
imshow(binary_yen)
imshow(binary_li)
imshow(binary_adaptive)

Beispiel #51
0
from pylab import imread, imshow, figure, show, subplot
from skimage import data, img_as_uint, img_as_float
import ocr

from pylab import imread, imshow, figure, show, subplot, plot, scatter
from scipy.cluster.vq import kmeans, vq
from skimage import data, img_as_uint, img_as_float
from skimage.external.tifffile import imsave
from skimage.filters import threshold_otsu, threshold_adaptive, threshold_yen
from skimage.segmentation import clear_border

imageFile = '../pics/14.png'
image = imread(imageFile)
img = data.imread(imageFile, as_grey=True)

global_thresh = threshold_yen(img)
# True False binary matrix represent color value of the img using global thresholding
binary_global = img > global_thresh

block_size = 40

# True False binary matrix represent color value of the img using adaptive thresholding
binary_adaptive = threshold_adaptive(img, block_size, offset=10)

# 0 1 binary matrix
img_bin_global = clear_border(img_as_uint(binary_global))

# 0 1 binary matrix 
img_bin_adaptive = clear_border(img_as_uint(binary_adaptive))

def yen_mask(rgbImage):
    gray_image = color.rgb2gray(rgbImage)
    val = filters.threshold_yen(gray_image)
    return gray_image <= val