Beispiel #1
0
def creatTrainigSam_3D(dataPath, iRnum = 6,iSnum = 12, gRnum = 4,gSnum = 12):
    """ Creat training samples(3D) and compute features for each training sample 

    Parameters
    ----------
    dataPath: str
        The directory where training image ROIs are stores.
    opt: str
        Options for compute features.
    iRnum: integer
        The number of rings to be divided in computing intensity features
    iSnum: integer
        The number of sectors to be divided in computing intensity features
    gRnum: integer
        The number of rings to be divided in computing gradient features
    gSnum: integer
        The number of sectors to be divided in computing gradient features
    """

    file_list = os.listdir(dataPath)

    LightPatchList = []
    bagid = 0
    instanceid = 0
    for fil in file_list:
        im = ImageIO.imReader(dataPath, fil,'tif',3)

        for i in range(im.size_2):

            # Calculating intensity features
            patch = TPatch.TPatch()
            patch.initialize(im.data[i])
            int_feats = patch.getIntenfeats(iRnum,iSnum)

            # Calculating segment features         
            seg_feats = patch.getSegmentFeats()

            # Calculating gradient features
            im.downSample(rate = 2)
            eqimg = histEqualization.histEqualization(im.sampled_data[i], 16)
            smoothimg = filters.gaussian_filter(eqimg, sigma = 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)
            patch = TPatch.TPatch()
            patch.initialize(smoothimg)
            gr_feats = patch.getGradfeats(gRnum,gSnum)
            
            feats = np.hstack((int_feats, gr_feats, seg_feats))

            lightPatch = TPatch.TLightPatch()
            lightPatch.pdata = im.data[i]
            lightPatch.feats = feats
            lightPatch.patch_center = (im.data[i].shape[0]/2, im.data[i].shape[1]/2)
            lightPatch.bagID = bagid
            lightPatch.instanceID = instanceid
            LightPatchList.append(lightPatch)

            instanceid = instanceid + 1
        
        bagid = bagid + 1

    return LightPatchList
def test_func():
    """Please specify the image tiff stack directory, file name and output
    path. Extracted masses list will be save in a workspace file for further use.
    Also please specify how many cores will be allocated for the parallel process.
    """


    dataPath = '/home/yanbin/Tomosynthesis/data/tiffs_3d/5016/'
    outputPath = '/home/yanbin/Tomosynthesis/script_test/'
    fileName = '5016EMML08.tif'

    ## load image data
    im = ImageIO.imReader(dataPath,fileName, 'tif')

    ## allocate cpu source
    pool = Pool(processes=6)
    params =[(i,im.data[i]) for i in range(im.size_2)]

    ## run in parallel
    sliceList = []    
    sliceList = pool.map(mass3d.parallelWrapper,params)
    
    ## save the workspace
    output = open(outputPath + 'suspicious.pkl', 'wb')
    pickle.dump(sliceList, output)
    output.close()
Beispiel #3
0
def test_mass():

    dataPath = 'C:/Tomosynthesis/localtest/'
    fileName = '5131R-recon08_45-1.tif'
    outputPath = 'C:/Tomosynthesis/localtest/res/'

    im = ImageIO.imReader(dataPath,fileName, 'tif',2)
    
    # padding borarders
    '''
    paddingrd = 10
    bordares = ((paddingrd,paddingrd),(paddingrd,paddingrd))
    paddingv = 10000
    bordarevs = ((paddingv,paddingv),(paddingv,paddingv))
    im = np.lib.pad(im.data[0], bordares, 'constant',constant_values = bordarevs)
    '''
    
    eqimg = histEqualization.histEqualization(im.data[0], 16)
    denoised = AT_denoising.DenoisingAW(eqimg)
    denoised = AT_denoising.DenoisingAW(denoised)
    denoised = AT_denoising.DenoisingAW(denoised)
    img = AT_denoising.DenoisingAW(denoised)
    tiffLib.imsave(outputPath + 'denoised.tif',img)

    # g(I)
    gI = morphsnakes.gborders(img, alpha=1, sigma=8)
    tiffLib.imsave(outputPath + 'gI.tif',np.float32(gI))
    # Morphological GAC. Initialization of the level-set.
    mgac = morphsnakes.MorphGAC(gI, smoothing=2, threshold=0.035, balloon=-1)
    mgac.levelset = circle_levelset(img.shape, (img.shape[0]/2, img.shape[1]/2), 140, scalerow=0.75)
    
    # Visual evolution.
    ppl.figure()
    ls = morphsnakes.evolve_visual(mgac, num_iters=110, background=img)
    tiffLib.imsave(outputPath + 'ls.tif',np.float32(ls))
def test_func():
    """You can conver a single smv file to 3D tiff by specifying
    data_path, output_path, fileName and set the SigleConvert flag to 1.

    You can conver a batch of smv files to 3D tiffs by specifying
    root data_path, output_path and set the BatchConvert flag to 1.

    """

    ## Please specify paths ##
    data_path = '/home/yanbin/Tomosynthesis/data/SAP_test_datasets/Screening_30_cases/6002/'
    output_path = '/home/yanbin/Tomosynthesis/script_test/'
    fileName = '6002L06.smv'

    ## Please specify Parameters ##
    BatchConvert = 0
    SigleConvert = 0
    dim = 3

    ## data_path check
    if not os.path.isdir(data_path):
	print "Data directory:\n"+ data_path +"\ndoes not exist"
	sys.exit()

    ## Format convert batch
    if BatchConvert == 1:
        print 'here'
        dir_list = os.listdir(data_path)
        print dir_list
        for dirc in dir_list:
            print dirc
            if os.path.isdir(data_path + dirc):
                # make directory for output files
                opath = output_path + dirc + '/'
                print opath
                if not os.path.isdir(opath):
                    os.makedirs(opath)
                    
                file_list = os.listdir(data_path + dirc)    
                for fil in file_list: 
                    im = ImageIO.imReader(data_path + dirc + '/', fil, 'smv')
                    ImageIO.imWriter(opath, fil.strip('smv') + 'tif',im,dim)

    ## Format convert single
    if SigleConvert == 1:
        im = ImageIO.imReader(data_path,fileName, 'smv')
        ImageIO.imWriter(output_path, fileName.strip('smv') + 'tif',im, dim)
Beispiel #5
0
def test_func():
    """Please specify the data directory and file name.This function runs
    the detection for a 3D stack.
    """

    dataPath = 'C:/Tomosynthesis/localtest/'
    outputPath = 'C:/Tomosynthesis/test_script/'   
    fileName = '5092-1.tif'

    # Loading data
    im = ImageIO.imReader(dataPath,fileName, 'tif',3)

    # run detection in parallel
    mc_Lists = []   
    pool = Pool(processes=1)
    params =[(i,im.data[i]) for i in range(im.size_2)]
    mc_Lists = pool.map(mc.parallelWrapper,params)
    
    global_id = mc.MC_connect_3d(mc_Lists)
    gloabal_list = mc.MCs_constuct_3d(mc_Lists,global_id)
    MC_List_3D = mc.MCs_constrain(gloabal_list)

    for item in MC_List_3D:
        print(item.center, item.intensity, item.volume)
Beispiel #6
0
import ImageIO
import TImage
import tiffLib

from scipy import ndimage as nd
import scipy
from skimage.filter import gabor_kernel
import numpy as np
import time
from numpy.fft import fft, ifft, fft2, ifft2, fftshift

dataPath = 'C:/Tomosynthesis/localtest/'
fileName = 'test-crop.tif'
outputPath = 'C:/Tomosynthesis/localtest/res/'
im = ImageIO.imReader(dataPath,fileName, 'tif',2)


kernel = np.real(gabor_kernel(0.0185, 0, 20, 20/float(0.9)))
print kernel.shape

start = time.clock()
temp_response = nd.convolve(im.data[0], kernel, mode='nearest')
elapsed = (time.clock() - start)
print elapsed

start = time.clock()
data = np.lib.pad(im.data[0], ((0,kernel.shape[0]),(0,kernel.shape[1])),'edge')
temp_response_2 = np.fft.irfft2(np.fft.rfft2(data) * np.fft.rfft2(kernel,data.shape))
temp_response_2 = temp_response_2[kernel.shape[0]/2:data.shape[0] - kernel.shape[0]/2,kernel.shape[1]/2:data.shape[1] - kernel.shape[1]/2]
elapsed = (time.clock() - start)
print elapsed
Beispiel #7
0
def creatTrainigSam(dataPath, opt = 'all', iRnum = 6,iSnum = 12, gRnum = 4,gSnum = 12):
    """ Creat training samples(2D) and compute features for each training sample 

    Parameters
    ----------
    dataPath: str
        The directory where training image ROIs are stores.
    opt: str
        Options for compute features.
    iRnum: integer
        The number of rings to be divided in computing intensity features
    iSnum: integer
        The number of sectors to be divided in computing intensity features
    gRnum: integer
        The number of rings to be divided in computing gradient features
    gSnum: integer
        The number of sectors to be divided in computing gradient features
    """

    file_list = os.listdir(dataPath)
    int_feats = np.zeros((len(file_list),4),np.double)
    gr_feats = np.zeros((len(file_list),4),np.double)
    seg_feats = np.zeros((len(file_list),7),np.double)
    all_feats = np.zeros((len(file_list),15),np.double)

    LightPatchList = []
    counter = 0
    for fil in file_list:
        im = ImageIO.imReader(dataPath, fil,'tif',2)

        # Calculating intensity features
        if opt == 'Int' or opt == 'all':
            patch = TPatch.TPatch()
            patch.initialize(im.data[0])
            int_feats[counter,:] = patch.getIntenfeats(iRnum,iSnum)

        # Calculating gradient features
        if opt == 'Grad' or opt == 'all':
            # preprocess
            im.downSample(rate = 2)
            eqimg = histEqualization.histEqualization(im.sampled_data[0], 16)
            smoothimg = filters.gaussian_filter(eqimg, sigma = 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)

            patch = TPatch.TPatch()
            patch.initialize(smoothimg)
            gr_feats[counter,:] = patch.getGradfeats(gRnum,gSnum)

        # Calculating segment features
        if opt == 'seg' or opt == 'all':
            
            patch = TPatch.TPatch()
            patch.initialize(im.data[0])
            seg_feats[counter,:] = patch.getSegmentFeats()

        if opt == 'all':
            all_feats[counter,:] = np.hstack((int_feats[counter,:], gr_feats[counter,:], seg_feats[counter,:]))

            lightPatch = TPatch.TLightPatch()
            lightPatch.pdata = im.data[0]
            lightPatch.feats = all_feats[counter,:]
            lightPatch.patch_center = (im.data[0].shape[0]/2, im.data[0].shape[1]/2)
            LightPatchList.append(lightPatch)

        counter = counter + 1
        
    if opt == 'all':
        return LightPatchList

    if opt == 'Int':
        return int_feats
    if opt == 'Grad':         
        return gr_feats

    if opt == 'seg':
        return seg_feats
Beispiel #8
0
def graphrepresentation(myarray,im):
    '''creating a graph and add images in tiff image as nodes of graphs  '''
    G=nx.Graph()
    Size=myarray.shape
    a=Size[0]
    for i in range(0,a-1):
        G.add_node(i,point=myarray[i])
        
    #G.add_nodes_from(im)
    
    #G.add_nodes_from(myarray)
    '''after create graph we should add edges and weight to edges by mutual information  '''
    numberofedges=myarray.shape[0]
    
    
    for i in range (myarray.shape[0]):
        sum=0  
        path='/Users/Shared/TomosynthesisData/processed/5039/'
        fileName='5039.tif'
        fileName = fileName.split('.')
        fileName = fileName[0] + str(i) + '.tif'
        im1 = ImageIO.imReader(path,fileName,'tif',2)
        for j in range(myarray.shape[0]):
            G.add_edge(i,j)
            #im1=im[i]
            fileName='5039.tif'
            fileName = fileName.split('.')
            fileName = fileName[0] + str(j) + '.tif'
            im2 = ImageIO.imReader(path,fileName,'tif',2)
            
            '''finding mutual information of different features then add them for finding weight'''
            feature1=waveletcLBP.concatinationhist(im1.data[0])
            feature2=waveletcLBP.concatinationhist(im2.data[0])
            weight=weightinginfo(feature1,feature2,im1,im2)
            sum=sum+weight
            
            feature1=RBST.shortrunfacility(im1.data[0],im1.data[0])
            feature2=RBST.shortrunfacility(im2.data[0],im2.data[0]) 
            weight=weightinginfo(feature1,feature2,im1,im2)
            
            
            print len(feature1), len(feature2)
            
            
            feature1=RBST.shortrunfacility(im1.data[0],im1.data[0])
            feature2=RBST.shortrunfacility(im2.data[0],im2.data[0]) 
            weight=weightinginfo(feature1,feature2,im1,im2)
            sum=sum+weight
            
            G.add_weighted_edges_from([i,j,sum])
            
    listpath=longest_path(G)
    "after finding longest we can combine features with the weighting list and order in list"
    lengthlist=len(listpath)       
    for i in range(lengthlist-1):
        a=listpath(i)
        b=listpath(i+1)
        weightonthelist=G.adjacency_ite(a,b)
        weightlist=list.append(weightonthelist)
    return weightlist, listpath    
        
            
             
        
        
    
    
     
    
    
    
    
Beispiel #9
0
    warped_im1,d1, d2 = registration.registration(im_r.data[0], im_l.data[0], 15,'c')

    tiffLib.imsave(outputPath + 'warped_im1.tif',np.float32(warped_im1))
    tiffLib.imsave(outputPath + 'd1.tif',np.float32(d1))
    tiffLib.imsave(outputPath + 'd2.tif',np.float32(d2))
    '''

    ############################# comparision test #####################################
    
    dataPath = 'C:/Tomosynthesis/localtest/reg/'
    outputPath = 'C:/Tomosynthesis/localtest/reg/'

    fileName_r = 'op.tif'
    fileName_l = '6044_l.tif'

    im_r = ImageIO.imReader(dataPath,fileName_r, 'tif',2)
    im_l = ImageIO.imReader(dataPath,fileName_l, 'tif',2)

    params = []
    params.append(('1d', 'cv_comp', cv.CV_COMP_CORREL))
    params.append(('1d', 'scipy_comp', 'Euclidean'))
    params.append(('1d', 'scipy_comp', 'Manhattan'))
    params.append(('1d', 'kl_div', 'None'))

    params.append(('2d', cv.CV_TM_SQDIFF_NORMED, 'None'))
    params.append(('2d', cv.CV_TM_CCORR_NORMED, 'None'))
    params.append(('2d', cv.CV_TM_CCOEFF_NORMED, 'None'))

    params.append(('decomp', 'eigen', 'None'))
    params.append(('decomp', 'NMF', 'None'))
Beispiel #10
0
        data_projected = Dimreduction.dim_Reduction(data, label, opt, n_components=2, visualize = True)
        classifier = classification.classifier(data_projected,label)
        classifier.train(opt ='SVM')
        classifier.classify()
    '''

    ############################# Mass 3D extraction ########################################
      
    dataPath = '/home/yanbin/Tomosynthesis/data/tiffs_3d/5016/'
    paraPath = '/home/yanbin/localtest/'
    outputPath = '/home/yanbin/Tomosynthesis/results/5016/'
    im_name = '5016EMML08.tif'
    

    # loading 
    im = ImageIO.imReader(dataPath,im_name, 'tif',3)
    print (im.size_0,im.size_1,im.size_2)
    
    control_name = 'feats_control_1.txt'
    cancer_name = 'feats_cancer.txt'
    control = np.loadtxt(paraPath + control_name)
    cancer = np.loadtxt(paraPath + cancer_name)

    # training
    data = np.vstack((control,cancer))
    label = np.zeros((control.shape[0] + cancer.shape[0],),np.int)
    label[0:control.shape[0]] = 1
    
    data_projected = Dimreduction.dim_Reduction(data, label, opt='randtree', n_components=5, visualize = False)
    classifier = classification.classifier(data_projected,label)
    classifier.train(opt ='SVM')
Beispiel #11
0
def main():

    ## Please specify paths ##
    data_path = '/home/yanbin/Tomosynthesis/data/SAP_test_datasets/Screening_30_cases/'
    output_path = '/home/yanbin/Tomosynthesis/data/tiffs_3d/'
    exe_path= '/home/yanbin/Tomosynthesis/code/'

    ## Please specify Run Flags ##
    FormatConvert = 1		
    AWDenoising = 0
    ContrastEnhancement = 0

    ## Please specify parameters ##
    dim = 3               # For format convert: save as 2d slices / 3d stack
    opt = 'asymptotic'    # For AWdenoising inverse transform options
    block_m=5
    block_n=5   # For AWdenoising Wiener filter window size block_m = block_n

    ###################### Avalability Check #######################

    # data_path check
    if not os.path.isdir(data_path):
	print "Data directory:\n"+ data_path +"\ndoes not exist"
	sys.exit()
	
    # exe_path check
    if not os.path.isdir(exe_path):
	print "Executable directory:\n"+ exe_path +"\ndoes not exist"
	sys.exit()
  
    ###################### Format Convert #######################
    
    if FormatConvert == 1:
        dir_list = os.listdir(data_path)
        print dir_list
        for dirc in dir_list:
            print dirc
            if os.path.isdir(data_path + dirc):
                # make directory for output files
                opath = output_path + dirc + '/'
                print opath
                if not os.path.isdir(opath):
                    os.makedirs(opath)
                    
                file_list = os.listdir(data_path + dirc)    
                for fil in file_list: 
                    im = ImageIO.imReader(data_path + dirc + '/', fil, 'smv')
                    ImageIO.imWriter(opath, fil.strip('smv') + 'tif',im,dim)

    ############################# Denoising ##########################
    if AWDenoising == 1:
        dir_list = os.listdir(data_path)
        print dir_list
        for dirc in dir_list:
            print dirc
            if os.path.isdir(data_path + dirc):
                # make directory for output files
                opath = output_path + dirc + '/'
                print opath
                if not os.path.isdir(opath):
                    os.makedirs(opath)
                    
                file_list = os.listdir(data_path + dirc)    
                for fil in file_list: 
                    im = ImageIO.imReader(data_path + dirc + '/', fil, 'tif',2)
                    denoised = AT_denoising.DenoisingAW(im.data[0], opt = 'asymptotic', block_m=5,block_n=5)
                    tiffLib.imsave(opath + 'denoised_' + fil,denoised)


    ###################### Contrast enhancement #######################
    if ContrastEnhancement == 1:
        dir_list = os.listdir(data_path)
        print dir_list
        for dirc in dir_list:
            print dirc
            if os.path.isdir(data_path + dirc):
                # make directory for output files
                opath = output_path + dirc + '/'
                print opath
                if not os.path.isdir(opath):
                    os.makedirs(opath)
                    
                file_list = os.listdir(data_path + dirc)    
                for fil in file_list: 
                    im = ImageIO.imReader(data_path + dirc + '/', fil, 'tif',2)
                    enhanced = histEqualization.histEqualization(im.data[0], 16)
                    tiffLib.imsave(opath + 'enhanced_' + fil,enhanced)