def biuld_multispace(cnts):
    """Extract some contours features (mean_int & circularity) and return a multivariable space"""
    # circularity = [4 * np.pi * np.divide(cv2.contourArea(c), cv2.arcLength(c, True) ** 2) for c in cnts]
    # mean_int = [np.mean(gg[np.ravel(c).reshape([len(c), 2])[:, 1], np.ravel(c).reshape([len(c), 2])[:, 0]]) for c in cnts]

    global circularity, cnts_area, approx
    circularity = np.zeros(len(cnts))
    mean_int = np.zeros(len(cnts))
    cnts_area = np.zeros(len(cnts))
    cnts_perim = np.zeros(len(cnts))
    approx = np.zeros(len(cnts))
    gg = gaussian_gradient_magnitude(img, sigma=1)
    gg = np.array(
        Image.fromarray(gg).resize(scale * np.array(np.transpose(img).shape)))
    for ii in range(len(cnts)):
        cnts_area[ii] = cv2.contourArea(cnts[ii])
        cnts_perim[ii] = cv2.arcLength(cnts[ii], True)
        approx[ii] = len(
            cv2.approxPolyDP(cnts[ii], 0.01 * cnts_perim[ii], True))
        circularity[ii] = 4 * np.pi * np.divide(cnts_area[ii], cnts_perim[ii]**
                                                2)
        coor = cnts[ii].reshape((len(cnts[ii]), 2))
        mean_int[ii] = np.mean(gg[np.array(coor[:, 1]), np.array(coor[:, 0])])
    features = np.column_stack(
        (mean_int, circularity))  # Mean intensity, circularity
    multi_space = np.reshape(features, (len(cnts), 1, features.shape[1]))
    return multi_space
    def find_significant(self):
        def convert_to_polar(x_coord, y_coord):
            magnitude = np.sqrt(x_coord**2 + y_coord**2)
            angle = np.arctan2(x_coord, y_coord)
            return magnitude, angle

        def convert_to_decart(magnitude, angle):
            x_coord = magnitude * np.cos(angle)
            y_coord = magnitude * np.sin(angle)
            return x_coord, y_coord

        f_i = np.fft.fft2(self.im)
        rho, phi = convert_to_polar(np.real(f_i), np.imag(f_i))
        l_f = np.log10(rho.clip(min=1e-9))
        h = 1 / 9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
        h_l = convolve(l_f, h)
        r_p = np.exp(l_f - h_l)
        imag_part, real_part = convert_to_decart(r_p, phi)
        img_combined = np.fft.ifft2(real_part + 1j * imag_part)
        s_f, _ = convert_to_polar(np.real(img_combined), np.imag(img_combined))
        s_f = gaussian_gradient_magnitude(s_f, (8, 0))
        s_f = s_f**2
        s_f = np.float32(s_f) / np.max(s_f)
        # s_f = np.flipud(s_f)
        # s_f = np.fliplr(s_f)
        th = 3 * np.mean(s_f)
        o_f = np.where(s_f > th, 1, 0)
        o_f = scipy.ndimage.binary_dilation(o_f).astype(o_f.dtype)
        o_f = scipy.ndimage.binary_erosion(o_f).astype(o_f.dtype)
        return o_f
Exemple #3
0
def getGradientThresholdAndImage(image, center, d, res):
    gradient_img = flt.gaussian_gradient_magnitude(image, 1)
    th = npy.max(
        gradient_img[npy.ceil(center[0] - d / res[0]):npy.floor(center[0] +
                                                                d / res[0]),
                     npy.ceil(center[1] - d / res[1]):npy.floor(center[1] +
                                                                d / res[1])])
    return th * 0.75, gradient_img
Exemple #4
0
def general_cc_var_num_channels(img, diff_order=0, mink_norm=1, sigma=1, mask_im=None, saturation_threshold=255,
                                dilation_size=3, clip_range=True):
    # img must have first dim color channel! img[c, x, y(, z, ...)]
    dim_img = len(img.shape[1:])
    if clip_range:
        minm = img.min()
        maxm = img.max()
    img_internal = np.array(img)
    if mask_im is None:
        mask_im = np.zeros(img_internal.shape[1:], dtype=bool)
    img_dil = deepcopy(img_internal)
    for c in range(img.shape[0]):
        img_dil[c] = grey_dilation(img_internal[c], tuple([dilation_size] * dim_img))
    mask_im = mask_im | np.any(img_dil >= saturation_threshold, axis=0)
    if sigma != 0:
        mask_im[:sigma, :] = 1
        mask_im[mask_im.shape[0] - sigma:, :] = 1
        mask_im[:, mask_im.shape[1] - sigma:] = 1
        mask_im[:, :sigma] = 1
        if dim_img == 3:
            mask_im[:, :, mask_im.shape[2] - sigma:] = 1
            mask_im[:, :, :sigma] = 1

    output_img = deepcopy(img_internal)

    if diff_order == 0 and sigma != 0:
        for c in range(img_internal.shape[0]):
            img_internal[c] = gaussian_filter(img_internal[c], sigma, diff_order)
    elif diff_order == 1:
        for c in range(img_internal.shape[0]):
            img_internal[c] = gaussian_gradient_magnitude(img_internal[c], sigma)
    elif diff_order > 1:
        raise ValueError("diff_order can only be 0 or 1. 2 is not supported (ToDo, maybe)")

    img_internal = np.abs(img_internal)

    white_colors = []

    if mink_norm != -1:
        kleur = np.power(img_internal, mink_norm)
        for c in range(kleur.shape[0]):
            white_colors.append(np.power((kleur[c][mask_im != 1]).sum(), 1. / mink_norm))
    else:
        for c in range(img_internal.shape[0]):
            white_colors.append(np.max(img_internal[c][mask_im != 1]))

    som = np.sqrt(np.sum([i ** 2 for i in white_colors]))

    white_colors = [i / som for i in white_colors]

    for c in range(output_img.shape[0]):
        output_img[c] /= (white_colors[c] * np.sqrt(3.))

    if clip_range:
        output_img[output_img < minm] = minm
        output_img[output_img > maxm] = maxm
    return white_colors, output_img
Exemple #5
0
 def __call__(self,image):
     if image.ndim==1:
         image = image.reshape(*self.tsize)
     left = 0.0+image[:,0]
     image[:,0] = 0
     deriv = filters.gaussian_gradient_magnitude(image,self.dsigma,mode='constant')
     if self.spread>0: deriv = filters.maximum_filter(image,(self.spread,self.spread))
     deriv /= 1e-6+amax(deriv)
     result = self.alpha*deriv + (1.0-self.alpha)*image
     result[:,0] = left
     return result
def multidim_gauss_grad(array, sigma=1):
    starttime = time.time()
    arrayofimages = [array]
    for i in range(3):
        arrayofimages.append(
            filters.gaussian_gradient_magnitude(copy.deepcopy(array), sigma=i))

    arrayofimages = list(map(normalize, arrayofimages))
    print("Multidimensional Gaussian Gradient Execution Time:\t" +
          str(time.time() - starttime))
    return arrayofimages
Exemple #7
0
 def __call__(self,image):
     if image.ndim==1:
         image = image.reshape(*self.tsize)
     left = 0.0+image[:,0]
     image[:,0] = 0
     deriv = filters.gaussian_gradient_magnitude(image,self.dsigma,mode='constant')
     if self.spread>0: deriv = filters.maximum_filter(image,(self.spread,self.spread))
     deriv /= 1e-6+amax(deriv)
     result = self.alpha*deriv + (1.0-self.alpha)*image
     result[:,0] = left
     return result
Exemple #8
0
def __extract_guassian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
    """
    Internal, single-image version of @see guassian_gradient_magnitude
    """
    # set voxel spacing
    if voxelspacing is None:
        voxelspacing = [1.] * image.ndim
        
    # determine gaussian kernel size in voxel units
    sigma = __create_structure_array(sigma, voxelspacing)
        
    return __extract_intensities(gaussian_gradient_magnitude(image, sigma), mask)
def find_significant_region(file_name):
    # filter_h = np.array([[0.04, 0.12, 0.04], [0.12, 0.36, 0.12], [0.04, 0.12, 0.04]])
    img = Image.open(file_name)
    channel = np.zeros(img.size)
    for i in range(img.size[0]):
        for j in range(img.size[1]):
            channel[i, j] = img.getpixel((i, j))
    f_i = np.fft.fft2(channel)
    # print(f_i[0, 0])
    # print(f_i.ndim)
    magnitude, angle = convert_to_polar(np.real(f_i), np.imag(f_i))
    # print(magnitude)
    # print(angle)
    l_f = np.log10(magnitude.clip(min=1e-9))
    # print(l_f)
    h = 1 / 9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
    h_l = convolve(l_f, h)
    # print(h_l)
    r_p = np.exp(l_f - h_l)
    # print(r_p)
    imag_part, real_part = convert_to_decart(r_p, angle)
    # print(imag_part)
    # print(real_part)
    img_combined = np.fft.ifft2(real_part + 1j * imag_part)
    s_f, _ = convert_to_polar(np.real(img_combined), np.imag(img_combined))
    s_f = gaussian_gradient_magnitude(s_f, (8, 0))
    # img2 = img.copy()
    # print(s_f)
    s_f = s_f ** 2
    s_f = np.float32(s_f) / np.max(s_f)
    s_f = np.flipud(s_f)
    s_f = np.fliplr(s_f)
    th = np.mean(s_f)
    o_f = np.zeros(s_f.shape)
    for i in range(s_f.shape[0]):
        for j in range(s_f.shape[1]):
            if s_f[i, j] > th:
                o_f[i, j] = 1
            else:
                o_f[i, j] = 0
    o_f = scipy.ndimage.binary_erosion(o_f).astype(o_f.dtype)
    o_f = scipy.ndimage.binary_dilation(o_f).astype(o_f.dtype)
    '''for i in range(s_f.shape[0]):
        for j in range(s_f.shape[1]):
            if o_f[i, j] == 0:
                img2.putpixel((i, j), 0)
            else:
                img2.putpixel((i, j), 255)
    img2.save(new_img_file_name)'''
    return o_f
def compute_eng_grad(img):
    """
    Computes the energy of an image using gradient magnitude

    Args:
        img4 (n,m,4 numpy matrix): RGB image with additional mask layer.
        rgb_weights (n,m numpy matrix): img-specific weights for RBG values

    Returns:
        n,m numpy matrix: Gradient energy map of the provided image
    """
    bw_img = rgb_to_gray(img)
    eng = generic_gradient_magnitude(bw_img, sobel)
    eng = gaussian_gradient_magnitude(bw_img, 1)
    return normalize(eng)
Exemple #11
0
def getGradientThresholdAndImage(image, center, d, res):
    gradient_img = flt.gaussian_gradient_magnitude(image, 1)
    th = npy.max(gradient_img[npy.ceil(center[0] - d / res[0]) : npy.floor(center[0] + d / res[0]), npy.ceil(center[1] - d / res[1]) : npy.floor(center[1] + d / res[1])])
    return th * 0.75, gradient_img
Exemple #12
0
def image_search(asm_model, image, test_image, threshold=70):
    # image[0] = x
    # image = y
    mean, var_matrix, principal_axis, comp_variance = asm_model

    # differentiate the image
    image_tmp = np.array(image, dtype=np.float)
    sigma = 2
    image_diff = sigma * gaussian_gradient_magnitude(image_tmp, sigma=sigma)
    
    # normalizing of the differentiated image    
    image_diff = normalize_image(image_diff)

    # The landmarks within the model is the meanshape
    model_x = np.copy(mean )

    # normalize model_x to align the leaf in the image as close as possible
    a_x, a_y, lower_x, lower_y, upper_x, upper_y = normalize_model(model_x, image_diff, threshold, image)

    # rotate model_x
    rotation_matrix = build_matrix(a_x, a_y)
    model_x_rotated = scale_and_rotate(model_x, rotation_matrix)

    # find initial translation of model_x
    t_x = lower_x - model_x_rotated[0]
    t_y = lower_y - model_x_rotated[1]
    length = len(model_x)/2
    t_vector = get_translation(t_x, t_y, length)

    # initial landmarks within the image
    image_x = model_x_rotated + t_vector

     # initial b vector
    b = np.array((0.0))
    b = np.tile(b, len(principal_axis))

    # instantiate the approximation of dx
    approx_dx = np.array((0))
    approx_dx = np.tile(approx_dx, len(model_x))

    # this loop should ideally run until the landmarks reach a fix point
    for i in range(50): # while True

        print('iteration ', i)

        # find dX -> the suggested changes in the image frame
        diff_image_x = adjustments_along_normal(image_x, image_diff, threshold)

        # all points are placed at the same coordinates
        if not diff_image_x.any():
            print('damn, all points are at the same coordinates')
            return (np.array(()), np.array(()))


        # align X o be as close to the new points as possible
        # alignment_parameters = a_x, a_y, t_x, t_y

        diff_alignment_parameters = aligner.solve_x(image_x+diff_image_x, image_x, var_matrix)

        diff_matrix = build_matrix(diff_alignment_parameters[0], diff_alignment_parameters[1])

        length = len(image_x)/2
        diff_vector = get_translation(diff_alignment_parameters[2], diff_alignment_parameters[3], length)

        # y from eq. 19
        #y = image_x + diff_image_x - image_x_c
        y = scale_and_rotate(model_x + approx_dx, rotation_matrix) + diff_image_x - diff_vector

        # calculate new s, theta, t_x, t_y
        #alignment_parameters = np.dot(alignment_parameters, diff_alignment_parameters)
        rotation_matrix = np.dot(rotation_matrix, diff_matrix)
        t_vector = t_vector + diff_vector

        # suggested movements of the points in the model space
        # dx = M((s(1+ds))^-1, -(theta + dtheta)) [y] - x
        diff_model_x = scale_and_rotate(y, np.linalg.inv(rotation_matrix)) - model_x

        #apply the shape contraints and approximate new model parameter x + dx
        # 0: x + dx ~ x + P*(b+db) <- allowable shape
        # 1: db = P^t * dx
        # 2: x + dx ~ x + P*(b+P^t * dx)
        # 3: b = b + db = b + P^t * dx

        # obs! PC's is 'transposed' so inverse the transposion

        # update b (3)
        db = np.dot(principal_axis, diff_model_x)

        # since b = [0,..,0] for mean, and x is mean -> b + db = db
        b = db

        # limit b to be 3 standard deviations from the mean (eq 15)
        for k in range(len(b)):
            if b[k] > 3 * math.sqrt(comp_variance[k][0]):
                print('b was bigger than allowable shape domain')
                print('b index: ', k)
                b[k] = 3 * math.sqrt(comp_variance[k][0])

            if b[k] < (-3) * math.sqrt(comp_variance[k][0]):
                print('b was smaller than allowable shape domain')
                print('b index: ', k)
                b[k] = (-3) * math.sqrt(comp_variance[k][0])

        # b coordinats in the model space
        approx_dx = np.dot(np.array(principal_axis).transpose(), db)


        # store the old suggestion of landmark to test if any change has happend
        image_x_old = image_x

        image_x = scale_and_rotate(model_x+approx_dx, rotation_matrix) + t_vector


        if sum(abs(image_x-image_x_old)) < 100:
            break

    return b, (model_x + approx_dx)
Exemple #13
0
def get_gaussian_gradient_magnitude(image, sigma=cfg.sigma):
    image_gradient = gaussian_gradient_magnitude(image, sigma)
    return image_gradient
def computeStructureFeatures(Xdata, isTraining, path):
    path = path + '/structFeatures'
    print("\nComputing: All Structure Features")
    print("is Training? %r" %isTraining)
    start = time.time()
    # Separate color channel
    RChannel = Xdata[:,:,0]
    GChannel = Xdata[:,:,1]
    BChannel = Xdata[:,:,2]
    # Calculate Laplacian of Gaussian (sigma = 1.6)
    print("Computing: Laplacian of Gaussian")
    gaussianLF_R = gaussian_laplace(RChannel, 1.6)
    gaussianLF_G = gaussian_laplace(GChannel, 1.6)
    gaussianLF_B = gaussian_laplace(BChannel, 1.6)
    gaussianLF = np.dstack((gaussianLF_R, gaussianLF_G, gaussianLF_B))
    gaussianLF = np.resize(gaussianLF, 
                             (gaussianLF.shape[0]*gaussianLF.shape[1], gaussianLF.shape[2]))
    end = time.time()
    if (isTraining):
        print("Computed: gaussianLapFeatures_Tr in %f seconds" %(end-start))
        
#        with open('gaussianLapFeatures_Tr.pkl','wb') as outfile:
#            pickle.dump(gaussianLF, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/gaussianLapFeatures_Tr.csv", 
#                   gaussianLF, delimiter=",")
    else:
        print("Computed: gaussianLapFeatures_Ts in %f seconds" %(end-start))
#        with open('gaussianLapFeatures_Ts.pkl','wb') as outfile:
#            pickle.dump(gaussianLF, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/gaussianLapFeatures_Ts.csv", 
#                   gaussianLF, delimiter=",")
        
    # Calculate eigenvalues of structure tensor  (sigma =1.6, 3.5)
    print("Computing: eigenvalues of structure tensor")
    start = time.time()
    
    Axx_R1, Axy_R1, Ayy_R1 = structure_tensor(RChannel, sigma = 1.6)
    larger_R1, smaller_R1 = structure_tensor_eigvals(Axx_R1, Axy_R1, Ayy_R1)
    Axx_R2, Axy_R2, Ayy_R2 = structure_tensor(RChannel, sigma = 3.5)
    larger_R2, smaller_R2 = structure_tensor_eigvals(Axx_R2, Axy_R2, Ayy_R2)
    Axx_G1, Axy_G1, Ayy_G1 = structure_tensor(GChannel, sigma = 1.6)
    larger_G1, smaller_G1 = structure_tensor_eigvals(Axx_G1, Axy_G1, Ayy_G1)
    Axx_G2, Axy_G2, Ayy_G2 = structure_tensor(GChannel, sigma = 3.5)
    larger_G2, smaller_G2 = structure_tensor_eigvals(Axx_G2, Axy_G2, Ayy_G2)
    Axx_B1, Axy_B1, Ayy_B1 = structure_tensor(BChannel, sigma = 1.6)
    larger_B1, smaller_B1 = structure_tensor_eigvals(Axx_B1, Axy_B1, Ayy_B1)
    Axx_B2, Axy_B2, Ayy_B2 = structure_tensor(BChannel, sigma = 3.5)
    larger_B2, smaller_B2 = structure_tensor_eigvals(Axx_B2, Axy_B2, Ayy_B2)
    eigenST = np.dstack((larger_R1, smaller_R1, larger_R2, smaller_R2, 
                              larger_G1, smaller_G1, larger_G2, smaller_G2, 
                              larger_B1, smaller_B1, larger_B2, smaller_B2))
    eigenST = np.resize(eigenST, 
                             (eigenST.shape[0]*eigenST.shape[1], eigenST.shape[2]))
    end = time.time()
    if (isTraining):
        print("Computed: eigenStructFeatures_Tr in %f seconds" %(end-start))
        
#        with open('eigenStructFeatures_Tr.pkl','wb') as outfile:
#            pickle.dump(eigenST, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/eigenStructFeatures_Tr.csv", 
#                   eigenST, delimiter=",")
    else:
        print ("Computed: eigenStructFeatures_Ts in %f seconds" %(end-start))        
#        with open('eigenStructFeatures_Ts.pkl','wb') as outfile:
#            pickle.dump(eigenST, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/eigenStructFeatures_Ts.csv", 
#                   eigenST, delimiter=",")
        
    # Calculate eigenvalues of Hessian matrix
    print("Computing: eigenvalues of Hessian matrix")
    start = time.time()
    Hrr_R1, Hrc_R1, Hcc_R1 = hessian_matrix(RChannel, sigma = 1.6, order='rc')
    larger_R1, smaller_R1 = hessian_matrix_eigvals(Hrr_R1, Hrc_R1, Hcc_R1)
    Hrr_R2, Hrc_R2, Hcc_R2 = hessian_matrix(RChannel, sigma = 3.5, order='rc')
    larger_R2, smaller_R2 = hessian_matrix_eigvals(Hrr_R2, Hrc_R2, Hcc_R2)
    Hrr_G1, Hrc_G1, Hcc_G1 = hessian_matrix(GChannel, sigma = 1.6, order='rc')
    larger_G1, smaller_G1 = hessian_matrix_eigvals(Hrr_G1, Hrc_G1, Hcc_G1)
    Hrr_G2, Hrc_G2, Hcc_G2 = hessian_matrix(GChannel, sigma = 3.5, order='rc')
    larger_G2, smaller_G2 = hessian_matrix_eigvals(Hrr_G2, Hrc_G2, Hcc_G2)
    Hrr_B1, Hrc_B1, Hcc_B1 = hessian_matrix(BChannel, sigma = 1.6, order='rc')
    larger_B1, smaller_B1 = hessian_matrix_eigvals(Hrr_B1, Hrc_B1, Hcc_B1)
    Hrr_B2, Hrc_B2, Hcc_B2 = hessian_matrix(BChannel, sigma = 3.5, order='rc')
    larger_B2, smaller_B2 = hessian_matrix_eigvals(Hrr_B2, Hrc_B2, Hcc_B2)
    eigenHess = np.dstack((larger_R1, smaller_R1, larger_R2, smaller_R2,
                                larger_G1, smaller_G1, larger_G2, smaller_G2,
                                larger_B1, smaller_B1, larger_B2, smaller_B2))
    eigenHess = np.resize(eigenHess, 
                             (eigenHess.shape[0]*eigenHess.shape[1], eigenHess.shape[2]))
    end = time.time()
    if (isTraining):
        print("Computed: eigenHessFeatures_Tr in %f seconds" % (end-start))
#        with open('eigenHessFeatures_Tr.pkl','wb') as outfile:
#            pickle.dump(eigenHess, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/eigenHessFeatures_Tr.csv", 
#                   eigenHess, delimiter=",")
    else:
        print("Computed: eigenHessFeatures_Ts in %f seconds" % (end-start))
#        with open('eigenHessFeatures_Ts.pkl','wb') as outfile:
#            pickle.dump(eigenHess, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/eigenHessFeatures_Ts.csv", 
#                   eigenHess, delimiter=",")
    
    # Calculate Gaussian gradient magnitude (sigma = 1.6)
    print("Computing: Gaussian gradient magnitude")
    start = time.time()
    gaussian_grad_R = gaussian_gradient_magnitude(RChannel, sigma = 1.6)
    gaussian_grad_G = gaussian_gradient_magnitude(GChannel, sigma = 1.6)
    gaussian_grad_B = gaussian_gradient_magnitude(BChannel, sigma = 1.6)
    gaussian_grad = np.dstack((gaussian_grad_R, gaussian_grad_G, 
                                    gaussian_grad_B))
    gaussian_grad = np.resize(gaussian_grad, 
                             (gaussian_grad.shape[0]*gaussian_grad.shape[1], 
                              gaussian_grad.shape[2]))
    end = time.time()
    if (isTraining):
        print("Computed: gaussianGradFeatures_Tr in %f seconds" % (end-start))
        
#        with open('gaussianGradFeatures_Tr.pkl','wb') as outfile:
#            pickle.dump(gaussian_grad, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/gaussianGradFeatures_Tr.csv", 
#                   gaussian_grad, delimiter=",")
    else:
        print("Computed: gaussianGradFeatures_Ts in %f seconds" % (end-start))
        
#        with open('gaussianGradFeatures_Ts.pkl','wb') as outfile:
#            pickle.dump(gaussian_grad, outfile, pickle.HIGHEST_PROTOCOL)
#        np.savetxt("/Users/wuwenjun/GitHub/CSE-577-Melanoma-Nuclei-Segmentation/gaussianGradFeatures_Ts.csv", 
#                   gaussian_grad, delimiter=",")
    
    All = np.concatenate((gaussianLF, eigenST, eigenHess, gaussian_grad),axis = 1)
    if (isTraining):
        path = path + '_Tr'
        np.savez_compressed(path, data = All)
        print("\nFINISHED: Structure Features for Training data\n")

    else:   
        path = path + '_Ts'
        np.savez_compressed(path, data = All)
        print("\nFINISHED: Structure Features for Testing data\n")
Exemple #15
0
    def distort(imgae, config):
        """ 向图像中添加噪声
        这个函数修改自gqcnn的源程序中,具体原理参考论文
        """
        imgae_ = imgae.copy()
        # config = self._config
        im_height = imgae_.shape[0]
        im_width = imgae_.shape[1]
        im_center = np.array([float(im_height-1)/2, float(im_width-1)/2])
        # denoising and synthetic data generation
        if config['multiplicative_denoising']:
            gamma_shape = config['gamma_shape']
            gamma_scale = 1.0 / gamma_shape
            mult_samples = ss.gamma.rvs(gamma_shape, scale=gamma_scale)
            imgae_ = imgae_ * mult_samples

        # randomly dropout regions of the image for robustness
        if config['image_dropout']:
            if np.random.rand() < config['image_dropout_rate']:
                nonzero_px = np.where(imgae_ > 0)
                nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
                num_nonzero = nonzero_px.shape[0]
                num_dropout_regions = ss.poisson.rvs(
                    config['dropout_poisson_mean'])

                # sample ellipses
                dropout_centers = np.random.choice(
                    num_nonzero, size=num_dropout_regions)
                x_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)
                y_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)

                # set interior pixels to zero
                for j in range(num_dropout_regions):
                    ind = dropout_centers[j]
                    dropout_center = nonzero_px[ind, :]
                    x_radius = x_radii[j]
                    y_radius = y_radii[j]
                    dropout_px_y, dropout_px_x = sd.ellipse(
                        dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                    imgae_[dropout_px_y, dropout_px_x] = 0.0

        # dropout a region around the areas of the image with high gradient
        if config['gradient_dropout']:
            if np.random.rand() < config['gradient_dropout_rate']:
                grad_mag = sf.gaussian_gradient_magnitude(
                    imgae_, sigma=config['gradient_dropout_sigma'])
                thresh = ss.gamma.rvs(
                    config['gradient_dropout_shape'], config['gradient_dropout_scale'], size=1)
                high_gradient_px = np.where(grad_mag > thresh)
                imgae_[high_gradient_px[0], high_gradient_px[1]] = 0.0

        # add correlated Gaussian noise
        if config['gaussian_process_denoising']:
            gp_rescale_factor = config['gaussian_process_scaling_factor']
            gp_sample_height = int(im_height / gp_rescale_factor)
            gp_sample_width = int(im_width / gp_rescale_factor)
            gp_num_pix = gp_sample_height * gp_sample_width
            if np.random.rand() < config['gaussian_process_rate']:
                gp_noise = ss.norm.rvs(scale=config['gaussian_process_sigma'], size=gp_num_pix).reshape(
                    gp_sample_height, gp_sample_width)
                # sm.imresize 有警告将被弃用
                # gp_noise = sm.imresize(
                #     gp_noise, gp_rescale_factor, interp='bicubic', mode='F')
                # st.resize 用来替用将被弃用的sm.imresize
                # gp_noise = st.resize(gp_noise, (im_height, im_width))
                gp_noise = cv2.resize(
                    gp_noise, (im_height, im_width), interpolation=cv2.INTER_CUBIC)
                imgae_[imgae_ > 0] += gp_noise[imgae_ > 0]

        # run open and close filters to
        if config['morphological']:
            sample = np.random.rand()
            morph_filter_dim = ss.poisson.rvs(
                config['morph_poisson_mean'])
            if sample < config['morph_open_rate']:
                imgae_ = snm.grey_opening(
                    imgae_, size=morph_filter_dim)
            else:
                closed_imgae_ = snm.grey_closing(
                    imgae_, size=morph_filter_dim)

                # set new closed pixels to the minimum depth, mimicing the table
                new_nonzero_px = np.where(
                    (imgae_ == 0) & (closed_imgae_ > 0))
                closed_imgae_[new_nonzero_px[0], new_nonzero_px[1]] = np.min(
                    imgae_[imgae_ > 0])
                imgae_ = closed_imgae_.copy()

        # randomly dropout borders of the image for robustness
        if config['border_distortion']:
            grad_mag = sf.gaussian_gradient_magnitude(
                imgae_, sigma=config['border_grad_sigma'])
            high_gradient_px = np.where(
                grad_mag > config['border_grad_thresh'])
            high_gradient_px = np.c_[
                high_gradient_px[0], high_gradient_px[1]]
            num_nonzero = high_gradient_px.shape[0]
            num_dropout_regions = ss.poisson.rvs(
                config['border_poisson_mean'])

            # sample ellipses
            dropout_centers = np.random.choice(
                num_nonzero, size=num_dropout_regions)
            x_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)
            y_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)

            # set interior pixels to zero or one
            for j in range(num_dropout_regions):
                ind = dropout_centers[j]
                dropout_center = high_gradient_px[ind, :]
                x_radius = x_radii[j]
                y_radius = y_radii[j]
                dropout_px_y, dropout_px_x = sd.ellipse(
                    dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                if np.random.rand() < 0.5:
                    imgae_[dropout_px_y, dropout_px_x] = 0.0
                else:
                    imgae_[dropout_px_y, dropout_px_x] = imgae_[
                        dropout_center[0], dropout_center[1]]

        # randomly replace background pixels with constant depth
        if config['background_denoising']:
            if np.random.rand() < config['background_rate']:
                imgae_[imgae_ > 0] = config['background_min_depth'] + (
                    config['background_max_depth'] - config['background_min_depth']) * np.random.rand()

        # symmetrize images
        if config['symmetrize']:
            # rotate with 50% probability
            if np.random.rand() < 0.5:
                theta = 180.0
                rot_map = cv2.getRotationMatrix2D(
                    tuple(im_center), theta, 1)
                imgae_ = cv2.warpAffine(
                    imgae_, rot_map, (im_height, im_width), flags=cv2.INTER_NEAREST)
            # reflect left right with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.fliplr(imgae_)
            # reflect up down with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.flipud(imgae_)
        return imgae_
def nuclei_active_region_segmentation(input_img,
                                      positions,
                                      omega_energies=dict(intensity=1.0,
                                                          gradient=1.5,
                                                          smoothness=10000.0),
                                      intensity_min=20000.,
                                      iterations=10,
                                      display=False):
    """
    3D extension of the multiple region extension of the binary level set implementation
    """

    segmentation_start_time = time()
    print "--> Active region segmentation (", len(positions), " regions )"

    from copy import deepcopy

    reference_img = deepcopy(input_img)

    size = np.array(reference_img.shape)
    voxelsize = np.array(reference_img.voxelsize)

    if display:
        from openalea.core.world import World
        world = World()

    if omega_energies.has_key('gradient'):
        from scipy.ndimage.filters import gaussian_gradient_magnitude
        start_time = time()
        print "  --> Computing image gradient"
        gradient = gaussian_gradient_magnitude(
            np.array(reference_img, np.float64),
            sigma=0.5 / np.array(reference_img.voxelsize))
        gradient_img = SpatialImage(np.array(gradient, np.uint16),
                                    voxelsize=reference_img.voxelsize)
        end_time = time()
        print "  <-- Computing image gradient         [", end_time - start_time, " s]"

    start_time = time()
    print "  --> Creating seed image"
    print positions.keys()
    seed_img = seed_image_from_points(size,
                                      voxelsize,
                                      positions,
                                      point_radius=1.0)
    regions_img = np.copy(seed_img)
    end_time = time()
    print "  <-- Creating seed image       (", len(np.unique(
        regions_img)) - 1, " regions )  [", end_time - start_time, " s]"

    if display:
        world.add(seed_img,
                  'active_regions_seeds',
                  colormap='glasbey',
                  voxelsize=voxelsize,
                  alphamap='constant',
                  volume=False,
                  cut_planes=True)
        raw_input()

    for iteration in xrange(iterations):
        start_time = time()
        print "  --> Active region energy gradient descent : iteration", iteration, " (", len(
            np.unique(regions_img)) - 1, " regions )"
        previous_regions_img = np.copy(regions_img)
        regions_img = active_regions_energy_gradient_descent(
            regions_img,
            reference_img,
            omega_energies=omega_energies,
            intensity_min=intensity_min,
            gradient_img=gradient)
        change = ((regions_img - previous_regions_img) != 0.).sum() / float(
            (regions_img > 1.).sum())
        end_time = time()
        print "  --> Active region energy gradient descent : iteration", iteration, "  (Evolution : ", int(
            100 * change), " %)  ", "[", end_time - start_time, " s]"

        if display:
            world.add(regions_img,
                      'active_regions',
                      colormap='invert_grey',
                      voxelsize=voxelsize,
                      intensity_range=(1, 2))

    segmented_img = SpatialImage(regions_img,
                                 voxelsize=reference_img.voxelsize)
    if display:
        world.add(segmented_img,
                  'active_regions',
                  colormap='glasbey',
                  voxelsize=voxelsize,
                  alphamap='constant')
        raw_input()

    segmentation_end_time = time()
    print "<-- Active region segmentation (", len(
        np.unique(segmented_img)
    ) - 1, " regions )    [", segmentation_end_time - segmentation_start_time, " s]"

    return segmented_img