示例#1
0
def getROC(input_file):
    global nbP
    global nbN
    y_class = []
    y_score = []
    files = basicOperations.getFiles(input_file, "txt")
    for inputFile in files:
        f = open(inputFile, 'r')
        lines = f.readlines()
        for line in lines:
            words = line.split()
            y_class.append(int(words[-1]))
            y_score.append(float(words[-2]))
            if words[-1] == "0":
                nbN += 1
            elif words[-1] == "1":
                nbP += 1
    return metrics.roc_curve(y_class, y_score)
#        1,1,1,1,1,1,1,1]

idx = []
for i in range(len(feature_select)):
    if feature_select[i]:
        idx.append(i)

##########################################################################
##########################################################################
## 7/10 new
## I. Load all datasets

num_candi = []

if 1:
    files = basicOperations.getFiles(path,'txt')

    learn_set = []
    learn_class = []
    learn_orig = []

    j=0
    for maList in files:
        print "Load:",j,maList
        N_error = 0
    
        f = open(maList,'r')
        lines = f.readlines()
        rows = len(lines)
        L = 0
        if rows>0:
示例#3
0
def loadAllDataset(pickleFile, train_folders, N_featPF):
   
    pklFiles = basicOperations.getFiles(input_path, "cpkl")

    # pattern1 = ".+(?P<slide>slide[0-9]).+cpkl"
    pattern1 = ".+pickles/(?P<slide>[A-Za-z0-9]+)/.+cpkl"
    toto1 = re.compile(pattern1)
    # pattern2 = ".+/(?P<filename>[A-Za-z0-9]+_[A-Za-z0-9]+_[0-9]+).+cpkl"
    pattern2 = ".+/(?P<filename>[A-Za-z0-9]+_[A-Za-z0-9]+).+cpkl"
    toto2 = re.compile(pattern2)
    
    feat = {}
    count = 1
    Nobjects = [0,0,0] # number of mitos, notmitos and others
    for folder in train_folders:
        mito = []
        notmito = []
        notannot = []
        others = []
        for pkl in pklFiles:
            m = re.search(folder, pkl)
            if m == None:
                continue
            pkltemp = open(pkl, 'rb')
            feats = cPickle.load(pkltemp)
            for i in range(len(feats)):
                if feats[i][-1]==1:
                    mito.append(feats[i][:-3])
                elif feats[i][-1]==2:
                    notmito.append(feats[i][:-3])
                if feats[i][-1]==3:
                    notannot.append(feats[i][:-3])
            # centers.append(feats[i][-3:-1])
            # slides.append(slide)
            pkltemp.close()
        # idx_shuffle = shuffleSeq(len(others))
        # for i in range(N_featPF):
        #     notannot.append(others[idx_shuffle[i]])
        # for x in mito:
        #     mitos.append(x)
        # for x in notmito:
        #     notmitos.append(x)


        ######################################
        ### check is there invalide (nan or inf) element
        mito = np.array(mito)
        notmito = np.array(notmito)
        notannot = np.array(notannot)
        toto = [mito, notmito, notannot]
        for i in range(3):
            if np.isnan(toto[i]).any():
                nanElem = np.isnan(toto[i])
                nanElemRow = np.unique(np.where(nanElem == True)[0])
                infElemRow = np.unique(np.where(toto[i] >= np.finfo(np.float32).max)[0])
                nanElemRow = np.unique(np.hstack((nanElemRow, infElemRow)))
                arBool = np.ones(len(toto[i]))
                for x in nanElemRow:
                    arBool[x] = 0
                arBool = arBool.astype(np.bool)
                toto[i] = toto[i][arBool]
                ## centers = centers[arBool]
                ## slides = slides[arBool]
                ## filenames = filenames[arBool]
        
        mito = toto[0]
        notmito = toto[1]
        notannot = toto[2]

        for i in range(3):
            for j in range(len(toto[i])):
                Nobjects[i] += 1
                feat[count] = {}
                feat[count]['folder'] = folder
                feat[count]['features'] = toto[i][j]
                if i==0:
                    feat[count]['annotation'] = 'mito'
                elif i==1:
                    feat[count]['annotation'] = 'notmito'
                else:
                    feat[count]['annotation'] = 'others'
                count += 1


    # f = open(pickleFile,'r')
    # feat = pickle.load(f)
    # f.close()

    ## get feature_name ##
    # featureName = feat[feat.keys()[0]]['features'].keys()
    print Nobjects
    featureName = 0
    return feat, featureName
示例#4
0
if __name__ == "__main__":
    ######
    # parameter
    n_job = 4
    
    input_path = "/home/seawave/work/database/train_40"
    csv_path = "/home/seawave/work/database/mitos_atypia"
    output_path = "/home/seawave/work/output/test3"
    
    
    train_folders = ["A03","A04","A05","A07","A10","A11","A12","A14","A15",\
            "A17","A18", "H03","H04","H05","H07","H10","H11","H12","H14","H15",\
            "H17","H18"]
    # train_folders = ["A03","A04","A05"]
   
    images = basicOperations.getFiles(input_path, "tiff")

    if 0: ### check not analysed images
        images_not_analysed = []
        pattern1 = ".+train_40/(?P<slide>[A-Za-z0-9]+)/(?P<filename>[A-Za-z0-9_]+).tiff"
        toto = re.compile(pattern1)
        for image in images:
            tata = toto.match(image)
            filename = tata.groupdict()['filename']
            imout = filename+".png"
            outfile = os.path.join(output_path, imout)
            if not os.path.isfile(outfile):
                images_not_analysed.append(image)

        if len(images_not_analysed) == 0:
            print "finished"
示例#5
0
## directories
path_result = "/Volumes/Xiwei_MacExt/output/test2"
path_imin = "/Volumes/Xiwei_MacExt/cecog_data/train_40"
path_csv = "/Volumes/Xiwei_MacExt/databases/mitos_atypia"
path_out_mitosis  = "/Volumes/Xiwei_MacExt/output/test2_crops"
# path_out_not_mitosis  = "/Volumes/Xiwei_MacExt/output/test1_crop"
# path_result = "/home/seawave/work/output/test2_div2"
# path_imin = "/home/seawave/work/database/train_40"
# path_csv = "/home/seawave/work/database/mitos_atypia"
# path_out_mitosis  = "/home/seawave/work/output/test2_crops"



## select mitosis count GTs
GT_list_all = basicOperations.getFiles(path_csv,'csv')
orig_image_all = basicOperations.getFiles(path_imin,'tiff')
results = basicOperations.getFiles(path_result,'png')

mitosis_set = []
not_mitosis_set = []
L1 = len(path_csv)
for filetemp1 in GT_list_all:
    filetemp2 = filetemp1[L1+1:]
    m = re.search("mitosis", filetemp2)
    if m!=None:
        m2 = re.search("not", filetemp2)
        if m2!=None:
            not_mitosis_set.append(filetemp1)
        else:
            mitosis_set.append(filetemp1)
示例#6
0
  return tf.Variable(initial, name)
  
#Convolution and Pooling
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')
                        



if __name__ == "__main__":

    txt_files = basicOperations.getFiles(input_path, "txt")
    ASF_files = basicOperations.getFiles(input_path, "png", "ASF")
    MACandi_files = basicOperations.getFiles(input_path, "png", "MaCandi")

    for testIdx in range(148):
        size_pool = size
        #### Get training data
        array_trainning_set, array_trainning_class, test_image_set = getTrainingData()

        ###################################################################3
        #### SETUP CNN
        # tf.set_random_seed(1234) 

        sess = tf.InteractiveSession()
        x = tf.placeholder(tf.float32, [None, size*size])
        y_ = tf.placeholder(tf.float32, [None, 2])
示例#7
0
import basicOperations


## A window centered at GT coordinate
winSize = 231
winSizeR = winSize/2
winSizeR2 = winSize/2+1
cut_orig = True
cut_result = True

## directories

path_result = "/home/seawave/work/output/test1b"


results = basicOperations.getFiles(path_result,'txt')


mitosis = []
for result in results:

    textTemp = open(result,'r')
    lines =  textTemp.readlines()

    if len(lines)==0:
        continue

    i = 0
    for line in lines:
        tempfeat = []
        words = line.split(' ')