def detect_fluid_contours(input_image, max_rpe_row, min_ilm_row):
    md_img = apply_median_filter(input_image)
    gray = cv.cvtColor(md_img, cv.COLOR_BGR2GRAY)

    chv = chan_vese(gray,
                    mu=0.25,
                    lambda1=1,
                    lambda2=1,
                    tol=1e-3,
                    max_iter=200,
                    dt=0.5,
                    init_level_set="checkerboard",
                    extended_output=True)

    data = chv[1].copy()

    im_max = 255
    data = abs(data.astype(np.float64) /
               data.max())  # normalize the data to 0 - 1
    data = im_max * data  # Now scale by 255
    ls_img = data.astype(np.uint8)

    temp = apply_k_mean_clustering(ls_img)
    temp = apply_canny_edge(temp)
    # out_img2, areas = find_ret_contours(temp, max_rpe_row, min_ilm_row)

    return temp
    def bytescale(data, cmin=None, cmax=None, high=255, low=0):
        """图像进行归一化,图像被归一化到 [cmin, cmax]"""
        # 复写函数 : from scipy.misc import bytescale
        if data.dtype == np.uint8:  # 当输入数据的类型是 uint8 的时候,就直接返回不做操作
            return data

        if high > 255:
            raise ValueError("`high` should be less than or equal to 255.")
        if low < 0:
            raise ValueError("`low` should be greater than or equal to 0.")
        if high < low:
            raise ValueError(
                "`high` should be greater than or equal to `low`.")

        if cmin is None:
            cmin = data.min()
        if cmax is None:
            cmax = data.max()

        cscale = cmax - cmin
        if cscale < 0:
            raise ValueError("`cmax` should be larger than `cmin`.")
        elif cscale == 0:
            cscale = 1

        scale = float(high - low) / cscale
        bytedata = (data - cmin) * scale + low
        return (bytedata.clip(low, high) + 0.5).astype(
            np.uint8)  # 给定一个区间,该区间外的值被剪切到该区间,最后为什么要加 0.5 ?
Beispiel #3
0
 def myfilter(self, data):
     if self.filter == 'sobel':
         return util.img_as_int(filters.sobel(data))
     elif self.filter == 'otsu':
         thresh = filters.threshold_otsu(data)
         return util.img_as_ubyte(data > thresh)
     elif self.filter == '阈值分割':
         thresh = self.thresholdvalue * data.max() / 100.0
         return util.img_as_ubyte(data > thresh)
     elif self.filter == 'canny edge':
         temp = util.img_as_ubyte(
             feature.canny(data, low_threshold=30, high_threshold=40))
         return temp
     elif self.filter == 'watershed':
         mask = util.img_as_ubyte(filters.gaussian_filter(data, 0.4))
         markers = feature.canny(data, low_threshold=30, high_threshold=40)
         markers = ndi.label(markers)[0]
         idata = filters.rank.gradient(data, morphology.disk(1))
         temp = morphology.watershed(data, markers, mask=mask)
         # hsv=color.convert_colorspace(temp,'L','RGB')
         # io.imshow(hsv)
         return temp
     elif self.filter == 'test':
         data = util.img_as_ubyte(filters.median(data, morphology.disk(2)))
         return data
Beispiel #4
0
def vis_square(data,padsize=1,padval=0):
    data-=data.min()
    data/=data.max()

    n = int(np.ceil(np.sqrt(data.shape[0])))
    print("The n is %.2f"%(n))
    print("The n is %.2f"%(data.ndim))
    padding = ((0,n**2-data.shape[0]),(0,padsize),(0,padsize)) + ((0,0),)*(data.ndim - 3)
    data = np.pad(data,padding,mode='constant',constant_values=(padval,padval))
    data = data.reshape((n,n) + data.shape[1:]).transpose((0,2,1,3)+ tuple(range(4,data.ndim+1)))
    data = data.reshape((n*data.shape[1],n*data.shape[3]) + data.shape[4:])
def moments(data):
    """Returns (height, x, y, width_x, width_y)
    the gaussian parameters of a 2D distribution by calculating its
    moments """
    total = data.sum()
    X, Y = np.indices(data.shape)
    x = (X*data).sum()/total
    y = (Y*data).sum()/total
    col = data[:, int(y)]
    width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
    row = data[int(x), :]
    width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
    height = data.max()
    return height, x, y, width_x, width_y
Beispiel #6
0
def vis_square(data):
    data = (data - data.min()) / (data.max() - data.min())

    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = (((0, n**2 - data.shape[0]), (0, 1),
                (0, 1))  # add some space between filters
               + ((0, 0), ) * (data.ndim - 3)
               )  # don't pad the last dimension (if there is one)
    data = np.pad(data, padding, mode='constant',
                  constant_values=1)  # pad with ones (white)

    data = data.reshape((n, n) + data.shape[1:]).transpose(
        (0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) +
                        data.shape[4:])

    plt.imshow(data)
    plt.axis('off')
    plt.show()
n_plane, n_row, n_col, n_chan = data.shape

#####################################################################
# Let us consider only a slice (2D plane) of the data for now. More
# specifically, let us consider the slice located halfway in the stack.
# The `imshow` function can display both grayscale and RGB(A) 2D images.

_, ax = plt.subplots()
ax.imshow(data[n_plane // 2])

#####################################################################
# According to the warning message, the range of values is unexpected. The
# image rendering is clearly not satisfactory colour-wise.

vmin, vmax = data.min(), data.max()
print(f'range: ({vmin}, {vmax})')

#####################################################################
# We turn to `plotly`'s implementation of the `imshow` function, for it
# supports `value ranges
# <https://plotly.com/python/imshow/#defining-the-data-range-covered-by-the-color-range-with-zmin-and-zmax>`_
# beyond ``(0.0, 1.0)`` for floats and ``(0, 255)`` for integers.

fig = px.imshow(data[n_plane // 2], zmax=vmax)
plotly.io.show(fig)
# sphinx_gallery_thumbnail_number = 2

#####################################################################
# Here you go, *fluorescence* microscopy!
Beispiel #8
0
def freedman_diaconis_bins(data):
    """Number of bins based on Freedman-Diaconis rule."""
    h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(
        len(data))
    return int(np.ceil((data.max() - data.min()) / h))
Beispiel #9
0
    plt.figure(figsize=(24, 8))
    for f in range(
            startframe,
            startframe + nframes,
    ):
        data = dataall[f, :, :]
        w, h = data.shape
        # Upsample image by scale
        fim = fft2(data)
        data = np.real(
            _upsampled_dft(fim, (w * scale, h * scale),
                           upsample_factor=scale)[::-1, ::-1])
        data = gaussian(data, 1.)
        data = crop_data(data)
        data = (data - data.min()) / (data.max() - data.min())
        #data = data/data.max()
        print("max data = ", data.max())
        print("data shape =", data.shape)

        minerr = 1e12
        for i in range(1):
            cells, err = simulated_anneal(cells, data, nt=800 * len(cells))
            cells = split_cells(data, cells, minlen=5.)
            #cells,err = minimizer(cells, data)
            print("error = ", err)
            if err < minerr:
                mincells = deepcopy(cells)
        plot_solution(mincells, data)
        #plt.savefig('simulated_annealing_frame%04d.png'%f)
        plt.savefig('newdata_frame%04d.png' % f)
Beispiel #10
0
def compute_M(data):
    cols = np.arange(data.size)
    return csr_matrix((cols, (data.ravel(), cols)),
                      shape=(data.max() + 1, data.size))