Пример #1
0
def anistropic_diffusion(a,
                         niter,
                         kappa,
                         gamma,
                         step,
                         option,
                         ploton,
                         name,
                         save_flag=False):
    b_time = time.time()

    #see http://pastebin.com/sBsPX4Y7 for details
    af = ft.anisodiff3(stack=a,
                       niter=niter,
                       kappa=kappa,
                       gamma=gamma,
                       step=step,
                       option=option,
                       ploton=ploton)

    end_time = time.time()
    print('Anistropic Diffusion de-noise takes', end_time - b_time, 's')

    if save_flag:
        af_fimg = (af[:, :, int(af.shape[2] / 2)]).copy()
        # TODO: Change the image and tomogram saving path
        af_fimg_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Anistropic_Diffusion/' + str(name) + \
                       '_AD_i=' + str(niter) + '_k=' + str(kappa) + '_g=' + str(gamma) + '.png'
        plt.imsave(af_fimg_path, af_fimg, cmap='gray')

        mrc_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Anistropic_Diffusion/' + str(name) + \
                       '_AD_i=' + str(niter) + '_k=' + str(kappa) + '_g=' + str(gamma) + '.mrc'
        io_file.put_mrc_data(af, mrc_path)

        return af_fimg
Пример #2
0
def bandpass_denoising(a, name, save_flag=False):
    b_time = time.time()
    grid = GV.grid_displacement_to_center(a.shape, GV.fft_mid_co(a.shape))
    rad = GV.grid_distance_to_center(grid)
    rad = np.round(rad).astype(np.int)

    # create a mask that only center frequencies components will be left
    curve = np.zeros(rad.shape)
    # TODO: change the curve value as desired
    curve[int(rad.shape[0] / 8) * 3:int(rad.shape[0] / 8) * 5,
          int(rad.shape[1] / 8) * 3:int(rad.shape[1] / 8) * 5,
          int(rad.shape[2] / 8) * 3:int(rad.shape[2] / 8) * 5] = 1

    #perform FFT and filter the data with the mask and then transform the filtered data back
    vf = ifftn(ifftshift((fftshift(fftn(a)) * curve)))
    vf = np.real(vf)

    end_time = time.time()
    print('Bandpass de-noise takes', end_time - b_time, 's')

    if save_flag:
        img = (vf[:, :, int(vf.shape[2] / 2)]).copy()
        # TODO: Change the image and tomogram saving path
        img_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Bandpass/' + str(
            name) + '_BP.png'
        plt.imsave(img_path, img, cmap='gray')

        mrc_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Bandpass/' + str(
            name) + '_BP.mrc'
        io_file.put_mrc_data(vf, mrc_path)

        return img
Пример #3
0
def g_denoising(G_type, a, name, gaussian_sigma, save_flag=False):
    b_time = time.time()

    if G_type == 1:
        a = G.smooth(a, gaussian_sigma)
    elif G_type == 2:
        a = G.dog_smooth(a, gaussian_sigma)
    elif G_type == 3:
        a = G.dog_smooth__large_map(a, gaussian_sigma)

    end_time = time.time()
    print('Gaussian de-noise takes', end_time - b_time, 's', ' sigma=',
          gaussian_sigma)

    if save_flag:
        img = (a[:, :, int(a.shape[2] / 2)]).copy()
        # TODO: Change the image and tomogram saving path
        img_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Gaussian/' + str(name) + '_G=' + \
                   str(gaussian_sigma) + '_type=' + str(G_type) + '.png'
        plt.imsave(img_path, img, cmap='gray')

        mrc_path = '/Users/apple/Desktop/Lab/Zach_Project/Denoising_Result/Gaussian/' + str(name) + '_G=' + \
                   str(gaussian_sigma) + '_type=' + str(G_type) + '.mrc'
        io_file.put_mrc_data(a, mrc_path)

        return img
Пример #4
0
def particle_picking(a, saliency_map, ref_saliency_max, ref_saliency_min):
    '''
    a : the original tomogram
    saliency_map: the modified output volume data a from step 5
    ref_saliency_max: the maximum saliency value
    ref_saliency_min: the minimun salienct value
    '''
    n = 0  #subtom number iterator

    dif = 8  #half of the frame size

    for i in range(saliency_map.shape[0]):
        for j in range(saliency_map.shape[1]):
            for k in range(saliency_map.shape[2]):
                # finding the saliency value that is greater or above 90% of the max saliency value
                if saliency_map[i][j][k] >= 0.9 * ref_saliency_max:
                    #pass if it is on the edge
                    #TODO: edge case handle
                    if (i - dif < 0 or i + dif > saliency_map.shape[0]
                            or j - dif < 0 or j + dif > saliency_map.shape[1]
                            or k - dif < 0 or k + dif > saliency_map.shape[2]):
                        pass

                    else:

                        # frame the 3d subarray from the original tomogram
                        subtom = a[i - dif:i + dif, j - dif:j + dif,
                                   k - dif:k + dif]

                        print('x axis starting and end:', i - dif, "and",
                              i + dif)
                        print('y axis starting and end:', j - dif, "and",
                              j + dif)
                        print('z axis starting and end:', k - dif, "and",
                              k + dif)
                        print("the dimension of subtom is", subtom.shape[0],
                              "by", subtom.shape[1], "by", subtom.shape[2])

                        n += 1
                        namemrc = "Desktop/result/saliency_map_subtomograms" + str(
                            n) + ".mrc"
                        namepng = "Desktop/result/saliency_map_subtomograms" + str(
                            n) + ".png"
                        io_file.put_mrc_data(subtom,
                                             namemrc)  # save as the mrc file
                        img = (subtom[:, :, int(subtom.shape[2] / 2)]).copy()
                        plt.imsave(namepng, img, cmap='gray')

                        #update the saliency map by filling the "cut" subtomogram matrices with minimum saliency value
                        saliency_map[i - dif:i +
                                     dif][j - dif:j +
                                          dif][k - dif:k +
                                               dif] = ref_saliency_min
Пример #5
0
def saliency_detection(a,
                       gaussian_sigma,
                       gabor_sigma,
                       gabor_lambda,
                       cluster_center_number,
                       save_flag=False):
    # Step 1
    # Data Pre-processing
    a = SN.gaussian_filter(input=a, sigma=gaussian_sigma)  # de-noise
    print('sigma=', gaussian_sigma)
    if save_flag:
        img = (a[:, :, int(a.shape[2] / 2)]).copy()
        plt.axis('off')
        plt.imshow(img, cmap='gray')
        plt.savefig('./original.png')  # save fig

    # Step 2
    # Supervoxel over-segmentation
    N = a.shape[0] * a.shape[1] * a.shape[2]
    n = cluster_center_number
    ck = []  # cluster center [x y z g]
    interval = int(math.pow(N / n, 1.0 / 3))
    x = int(interval / 2)
    y = int(interval / 2)
    z = int(interval / 2)
    print('interval=%d' % interval)

    while (x < a.shape[0]) and (y < a.shape[1]) and (
            z < a.shape[2]):  # Initialization
        ck.append([x, y, z, a[x][y][z]])
        if x + interval < a.shape[0]:
            x = x + interval
        elif y + interval < a.shape[1]:
            x = int(interval / 2)
            y = y + interval
        else:
            x = int(interval / 2)
            y = int(interval / 2)
            z = z + interval

    print('the number of cluster centers = %d' % len(ck))
    print(ck[:5])
    label = [[[0 for i in range(a.shape[2])] for i in range(a.shape[1])]
             for i in range(a.shape[0])]
    label = np.array(label)  # numba supports numpy array
    distance = [[[float('inf') for i in range(a.shape[2])]
                 for i in range(a.shape[1])] for i in range(a.shape[0])]
    distance = np.array(distance)
    # label = np.zeros((a.shape[0], a.shape[1], a.shape[2])) # numba will report error
    # distance = np.full((a.shape[0], a.shape[1], a.shape[2]), np.inf)
    start_time = time.time()
    print('Supervoxel over-segmentation begins')
    ck = np.array(ck)
    redundant_flag = np.array([False] * len(ck))

    for number in range(10):  # 10 iterations suffices for most images
        b_time = time.time()
        print('\n%d of 10 iterations' % number)
        distance, label, ck = fast_SLIC(distance, label, ck, a, interval)
        # merge cluster centers
        ck_dist_min = interval / 2  # merge two cluster centers if the distance between them is less than ck_dist_min
        for ck_i in range(len(ck)):
            if redundant_flag[ck_i]:
                continue
            d = cdist(ck[:, :3], np.reshape(ck[ck_i, :3], (1, -1))).flatten()
            ind = np.where(d < ck_dist_min)[0]
            if ind.size > 1:
                for ind_t in ind:
                    if ind_t == ck_i:
                        continue
                    redundant_flag[ind_t] = True
                    label[label == ind_t] = ck_i
                    ck[ind_t][3] = np.inf
        print('total number of remove cluster centers = ',
              sum(redundant_flag == True))
        e_time = time.time()
        print('\n', e_time - b_time, 's')
    end_time = time.time()
    print('Supervoxel over-segmentation done,', end_time - start_time, 's')

    # save labels for Feature extraction
    labels_remove_num = sum(redundant_flag == True)
    labels = {}

    for i in range(0, a.shape[0]):
        for j in range(0, a.shape[1]):
            for k in range(0, a.shape[2]):
                if label[i][j][k] in labels:
                    labels[label[i][j][k]].append([i, j, k])
                else:
                    labels[label[i][j][k]] = [[i, j, k]]
    assert labels_remove_num + len(labels) == len(ck)

    if save_flag:
        np.save('./labels', labels)
        img = (a[:, :, int(a.shape[2] / 2)]).copy()
        k = int(a.shape[2] / 2)
        draw_color = np.min(a)
        for i in range(1, img.shape[0] - 1):
            for j in range(1, img.shape[1] - 1):
                if label[i][j][k] != label[i - 1][j][k] or label[i][j][k] != label[i + 1][j][k] or label[i][j][k] != \
                        label[i][j - 1][k] or label[i][j][k] != label[i][j + 1][k]:
                    img[i][j] = draw_color
        plt.axis('off')
        plt.imshow(img, cmap='gray')
        plt.savefig('./SLIC.png')  # save fig

    # Step 3
    # Feature Extraction
    stime = time.time()
    Lambda = gabor_lambda
    filters = filter_bank_gb3d(sigma=gabor_sigma,
                               Lambda=Lambda,
                               psi=0,
                               gamma=1)
    # Note: For better performance, two filters with different sigmas are used here in the paper.
    # filters1 = filter_bank_gb3d(sigma=s1, Lambda=Lambda,psi=0,gamma=1)
    # filters2 = filter_bank_gb3d(sigma=s2, Lambda=Lambda,psi=0,gamma=1)
    # filters = filters1 + filters2
    filters_num = len(filters)
    feature_matrix = np.zeros(
        (len(filters) + 6,
         len(labels)))  # Gabor filter bases features and 6 density features
    print('%d Gabor based features' % filters_num)

    print('Feature extraction begins')
    # 3D Gabor filter based features
    for i in range(len(filters)):
        # convolution
        start_time = time.time()
        # b=SN.correlate(a,filters[i]) # too slow
        b = signal.correlate(a, filters[i], mode='same')
        end_time = time.time()
        print('feature %d done (%f s)' % (i, end_time - start_time))

        # show Gabor filter output
        if save_flag:
            img = (b[:, :, int(a.shape[2] / 2)]).copy()
            plt.axis('off')
            plt.imshow(img, cmap='gray')
            plt.savefig('./gabor_output(%d).png' % i)  # save fig

        # generate feature vector
        start_time = time.time()
        index_col = 0
        for key in labels:
            vox = labels[key]
            sum_vox = 0
            for j in range(len(vox)):
                sum_vox = sum_vox + b[vox[j][0], vox[j][1], vox[j][2]]
            # print('sum.type',type(sum)) <class 'numpy.float64'>
            sum_vox = sum_vox / len(vox)
            feature_matrix[i][index_col] = sum_vox
            index_col += 1
        # print(feature_matrix[i, 0:30])
        end_time = time.time()
        print('feature vector %d done (%f s)' % (i, end_time - start_time))
    print('3D Gabor filter based features done')

    # density features
    min_val = np.min(a)
    max_val = np.max(a)
    width = (max_val - min_val) / 6
    index_col = 0
    for key in labels:
        vox = labels[key]
        for j in vox:
            bin_num = min(int((a[j[0]][j[1]][j[2]] - min_val) / width),
                          5)  # normalize
            feature_matrix[filters_num + bin_num][index_col] += 1
        index_col += 1
    print('Density features done')

    if save_flag:
        np.save('./feature_matrix', feature_matrix)

    etime = time.time()
    print('Feature extraction done,', etime - stime, 's')

    # Step 4
    # RPCA
    start_time = time.time()
    print('RPCA begins')
    L, S = robust_pca(feature_matrix)
    end_time = time.time()
    print('RPCA done, ', end_time - start_time, 's')
    supervoxel_saliency = np.sum(S, axis=0) / S.shape[0]
    if save_flag:
        np.save('./supervoxel_saliency', supervoxel_saliency)

    # Step 5
    # Generate Saliency Map
    min_saliency = np.min(supervoxel_saliency)
    max_saliency = np.max(supervoxel_saliency)
    t = (min_saliency + max_saliency) / 2  # threshold
    print('min=', min_saliency, 'max=', max_saliency, 'threshold=', t)
    index_col = 0
    for key in labels:
        vox = labels[key]
        if supervoxel_saliency[index_col] < t:
            supervoxel_saliency[index_col] = min_saliency

        for j in vox:
            a[j[0]][j[1]][j[2]] = supervoxel_saliency[index_col]
        index_col += 1
        # print('sum.type',type(sum)) <class 'numpy.float64'>

    if save_flag:
        img = a[:, :, int(a.shape[2] / 2)].copy()
        plt.axis('off')
        plt.imshow(img, cmap='gray')
        plt.savefig('./saliency_map.png')
        io_file.put_mrc_data(a, './saliency_map.mrc')
        print('saliency map saved')

    return a