def run(f):
    patient_id = os.path.basename(f)[:-len("_seg.nii.gz")]

    # if patient_id != "1585":
    #     return
    
    print "PATIENT_ID",patient_id
    
    f_img = img_folder + "/" + patient_id + ".nii"
    if not os.path.exists(f_img):
        f_img += ".gz"
        
    seg = irtk.imread( f, dtype='float32', force_neurological=True )
    img = irtk.imread( f_img, dtype='float32', force_neurological=True )
    
    img = irtk.Image(nd.median_filter(img.view(np.ndarray),(3,5,5)),img.get_header())

    ga = all_ga[patient_id]

    scale = get_CRL(ga)/get_CRL(30.0)

    # if all_iugr[patient_id][0] == 1:
    #     scale = (get_weight(ga,0.02) / get_weight(30,0.5)) ** (1.0/3.0)
    # else:
    #     scale = (get_weight(ga,0.5) / get_weight(30,0.5)) ** (1.0/3.0)
    
    seg = seg.resample( 1.0*scale, interpolation='nearest')
    img = img.resample( 1.0*scale, interpolation='bspline' )
    
    irtk.imwrite(output_folder + "/data_resampled_weight/"+patient_id+"_img.nii.gz",img)
    irtk.imwrite(output_folder + "/data_resampled_weight/"+patient_id+"_seg.nii.gz",seg)

    return
示例#2
0
def get_training_data(file_img, file_mask, r):
    # create mask
    input_mask = irtk.imread(file_mask)
    x_min, y_min, z_min, x_max, y_max, z_max = (input_mask == 0).bbox()

    background = irtk.zeros(input_mask.get_header(), dtype='uint8')
    background[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1] = 1
    background = nd.morphological_gradient(background, size=7)
    n = background[z_min + 1:z_max, y_min + 1:y_max, x_min + 1:x_max].sum()
    z = np.random.randint(low=0, high=input_mask.shape[0], size=1.25 * n)
    y = np.random.randint(low=0, high=input_mask.shape[1], size=1.25 * n)
    x = np.random.randint(low=0, high=input_mask.shape[2], size=1.25 * n)
    background[z, y, x] = 1
    background[z_min + 1:z_max, y_min + 1:y_max, x_min + 1:x_max] = 0

    foreground = (input_mask == 1).astype('uint8')

    new_mask = irtk.zeros(input_mask.get_header(), dtype='uint8')
    new_mask[foreground == 1] = 1
    new_mask[background != 0] = 2

    img = irtk.imread(file_img, dtype='float32')

    X = []
    Y = []

    for z in xrange(img.shape[0]):
        YX = np.transpose(np.nonzero(foreground[z]))
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D(img[z], r, YX)
        patches = np.reshape(
            patches, (patches.shape[0], patches.shape[1] * patches.shape[2]))
        print patches.shape, YX.shape
        X.extend(patches)
        Y.extend([1] * len(YX))

    for z in xrange(img.shape[0]):
        YX = np.transpose(np.nonzero(background[z]))
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D(img[z], r, YX)
        patches = np.reshape(
            patches, (patches.shape[0], patches.shape[1] * patches.shape[2]))
        print patches.shape, YX.shape
        X.extend(patches)
        Y.extend([0] * len(YX))

    return X, Y
示例#3
0
 def show_offline_preprocessing( self, folder ):
     if not os.path.exists(folder):
         os.makedirs(folder)
     all_files = glob( "offline_preprocessing/*_img.nii.gz" )
     for f in all_files:
         print f
         name = os.path.basename(f)[:-len("_img.nii.gz")]
         img = irtk.imread(f,dtype='float32')
         seg = irtk.imread("offline_preprocessing/"+name+"_seg.nii.gz",dtype='uint8')
         irtk.imshow(img,seg,filename=folder+"/"+name+".png",opacity=0.4)
def create_mask_from_all_masks(f_lists,transformations,ga,resolution=0.75):
    points = []
    for f, t in zip(f_lists,transformations):
        m = irtk.imread(f,force_neurological=True)
        points.extend( t.apply(get_corners(m)) )

    points = np.array(points,dtype='float64')
    
    x_min, y_min, z_min = points.min(axis=0)
    x_max, y_max, z_max = points.max(axis=0)

    pixelSize = [resolution, resolution, resolution, 1]
    orientation = np.eye( 3, dtype='float64' )
    origin = [ x_min + (x_max - x_min)/2,
               y_min + (y_max - y_min)/2,
               z_min + (z_max - z_min)/2,
               0 ]
    dim = [ (x_max - x_min)/resolution,
            (y_max - y_min)/resolution,
            (z_max - z_min)/resolution,
            1 ]

    header = irtk.new_header( pixelSize=pixelSize,
                orientation=orientation,
                origin=origin,
                dim=dim )

    mask = irtk.zeros( header, dtype='float32' )

    for f, t in zip( f_lists, transformations ):
        m = irtk.imread(f,force_neurological=True).transform(t, target=mask,interpolation="linear")
        mask += m

    irtk.imwrite( "debug_mask1.nii.gz", mask)
    
    mask = irtk.Image( nd.gaussian_filter( mask, 0.5 ),
                       mask.get_header() )

    irtk.imwrite( "debug_mask2.nii.gz", mask)

    mask = (mask > 0).bbox(crop=True).astype('uint8')

    scale = get_CRL(ga)/get_CRL(30.0)
    template = irtk.imread(f_template,force_neurological=True)
    template.header['pixelSize'][:3] /= scale
    
    template = template.transform(target=mask,interpolation='nearest')
    mask[template==0] = 0

    irtk.imwrite( "debug_template.nii.gz", template)
    irtk.imwrite( "debug_mask3.nii.gz", mask)

    return mask
def crop_data(f,mask,t):
    file_id = os.path.basename(f).split('.')[0]
    
    img = irtk.imread( f, dtype='float32',force_neurological=True )
    seg = mask.transform(t.invert(), target=img,interpolation='nearest')
    x_min,y_min,z_min,x_max,y_max,z_max = seg.bbox()
    seg = seg[z_min:z_max+1,
              y_min:y_max+1,
              x_min:x_max+1]
    img = img[z_min:z_max+1,
              y_min:y_max+1,
              x_min:x_max+1].rescale(0,1000) + 1.0 # +1 to avoid zeros in the heart
    img_file = output_dir + '/img_' + file_id + ".nii.gz"
    irtk.imwrite( img_file, img )
    for z in range(img.shape[0]):
        scale = img[z].max()
        img[z] = restoration.nl_means_denoising(img[z].rescale(0.0,1.0).view(np.ndarray),
                                                fast_mode=False,
                                                patch_size=5,
                                                patch_distance=7,
                                                h=0.05,
                                                multichannel=False)
        img[z] *= scale
    img[seg==0] = 0
    masked_file = output_dir + '/masked_' + file_id + ".nii.gz"
    irtk.imwrite( masked_file, img )
示例#6
0
def predict( self,
             filename,
             nb_autocontext=None,
             mask=None,
             debug=False,
             return_all=False ):
    """
    The prediction function must be defined outside of the main class in order to be used in joblib's Parallel.
    """
    nb_labels = len(self.params['labels'])+1
    
    if nb_autocontext is None:
        nb_autocontext = len(glob(self.params['name'] + "_*"))

    if self.params['predict_preprocessing_function'] is None:
        img = irtk.imread( filename, dtype="float32" )
        img = img.resample( pixelSize=self.params['resample'], interpolation='linear' ).rescale(0,1000)
    else:
        img = self.params['predict_preprocessing_function'](self, filename).copy()
    
    if mask is None:
        mask = irtk.ones( img.get_header(), dtype="uint8" ).as3D()
        mask[img==0] = 0
    else:
        mask = mask.resample( pixelSize=self.params['resample'], interpolation='nearest' ).astype('uint8')
    
    probas = predict_autocontext( self,
                                  img,
                                  mask,
                                  nb_labels,
                                  nb_autocontext,
                                  debug=debug,
                                  return_all=return_all )
    
    return probas
示例#7
0
def show(all_files,prefix="",saturate=False):
    for f in all_files:
        name = os.path.basename(f)[:-len('.nii.gz')]
        img = irtk.imread(f,dtype="float32")
        if saturate:
            img = img.saturate().rescale()
        png_name = "img/"+prefix+name+".png"
        print png_name
        irtk.imshow(img,filename=png_name)
def align_to_template(f,f_template,output_folder,ga):
    file_id = f.split('/')[-3]
    landmarks = irtk.imread(f,force_neurological=True)
    scale = get_CRL(ga)/get_CRL(30.0)
    template = irtk.imread(f_template,force_neurological=True)
    template.header['pixelSize'][:3] /= scale
    points = []
    points_template = []
    for i,j in zip( [2,8,3,4,5],
                    [5,4,1,2,3] ):
       points_template.append( template.ImageToWorld( nd.center_of_mass(template.view(np.ndarray)==i)[::-1] ) )
       points.append( landmarks.ImageToWorld( nd.center_of_mass(landmarks.view(np.ndarray)==j)[::-1] ) )

    t,rms = irtk.registration_rigid_points( np.array(points),
                                            np.array(points_template),
                                            rms=True )
    print "RMS: ", rms
    t.invert().write( output_folder + '/' + file_id + '.dof' )
    landmarks = landmarks.transform(t,target=template)
    irtk.imwrite( output_folder + '/landmarks_' + file_id + '.nii.gz',landmarks )
    return t
def run(f):
    patient_id = os.path.basename(f)[:-len("_seg.nii.gz")]

    print "PATIENT_ID",patient_id

    f_img = data_folder + "/" + patient_id + ".nii"
    if not os.path.exists(f_img):
        f_img += ".gz"
    seg = irtk.imread( f, dtype='float32', force_neurological=True )
    img = irtk.imread( f_img, dtype='float32', force_neurological=True )

    img = irtk.Image(nd.median_filter(img.view(np.ndarray),(3,5,5)),img.get_header())
    
    ga = all_ga[patient_id]

    scale = get_CRL(ga)/get_CRL(30.0)

    OFD = get_OFD(30.0)
    BPD = get_BPD(30.0)
    CRL = get_CRL(30.0)

    brain_center = seg.ImageToWorld( np.array(nd.center_of_mass( (seg == 2).view(np.ndarray) ),
                                                 dtype='float32')[::-1] )

    header = img.get_header()
    header['origin'][:3] = brain_center
    header['pixelSize'][:3] = 1.0*scale
    header['dim'][0] = CRL
    header['dim'][1] = CRL
    header['dim'][2] = CRL
    
    img = img.transform( target=header, interpolation="bspline" )
    seg = seg.transform( target=header, interpolation="nearest" )

    img[img<1.0] = 0
    
    irtk.imwrite(output_folder + "brain_center/"+patient_id+"_img.nii.gz",img)
    irtk.imwrite(output_folder + "brain_center/"+patient_id+"_seg.nii.gz",seg)
    
    return
def get_center_brain_detection(f,world=True):
    input_mask = irtk.imread( f, force_neurological=False )
    mask = irtk.ones( input_mask.get_header(), dtype='uint8' )
    mask[input_mask == 2] = 0
    x_min, y_min, z_min, x_max, y_max, z_max = map( float, mask.bbox() )
    center = [ x_min + (x_max-x_min)/2,
               y_min + (y_max-y_min)/2,
               z_min + (z_max-z_min)/2 ]
    if not world:
        center = np.array(center,dtype='float64')[::-1]
        return center
    else:
        center = input_mask.ImageToWorld(center)
        return center
示例#11
0
    def score( self,
               validation_patients,
               nb_autocontext=None ):
        gc.collect()
        filenames = []
        for patient_id in validation_patients:
            img_filename = self.params['img_folder'] + "/" + patient_id + self.params['file_extension']
            filenames.append(img_filename)
            
        # probas = Parallel(n_jobs=self.params['n_jobs'])(delayed(predict_level)( self,
        #                                                                         img_filename,
        #                                                                         all_ga[patient_id],
        #                                                                         level=level,
        #                                                                         nb_autocontext=nb_autocontext )
        #                                                 for patient_id,img_filename in zip(validation_patients,filenames) )

        probas = []
        for patient_id,img_filename in zip(validation_patients,filenames):
            print img_filename
            probas.append( predict( self,
                                    img_filename,
                                    nb_autocontext=nb_autocontext ) )

        print "will compute Dice scores"
        score = 0.0
        n = 0
        for patient_id,proba in zip(validation_patients,probas):
            header = proba.get_header()
            header['dim'][3] = 1
            if self.params['offline_preprocessing']:
                seg_filename =  "offline_preprocessing/"+patient_id+"_seg"+ self.params['file_extension']
            else:
                seg_filename =  self.params['seg_folder'] + "/" +patient_id+"_seg"+ self.params['file_extension']
            seg = irtk.imread( seg_filename, dtype="uint8" )
            #seg = seg.resample( self.params['resample'], interpolation="nearest").astype('uint8')

            # irtk.imwrite( "debug/"+patient_id+"_proba"+str(nb_autocontext)+".nii.gz",
            #                   proba ) 
                            
            # we skip mother/background as it depends of mask
            for i in [1]:#xrange(1,proba.shape[0]):
                dice,overlap = (seg==i).dice( self.hard_thresholding( proba[i] ),
                                              verbose=False)
                score += dice
                n += 1
        
        return score/n
def process_file( f, ga, step=1, DEBUG=False ):
    print f
    img = irtk.imread( f, dtype='float32', force_neurological=False )
    
    ## Resample
    img = resampleOFD( img, ga )

    ## Contrast-stretch with saturation
    img = img.saturate(1,99).rescale().astype('uint8')

    detector = cv2.SIFT( nfeatures=0,
                         nOctaveLayers=3,
                         contrastThreshold=0.04,
                         edgeThreshold=10,
                         sigma=0.8)
    descriptorExtractor = cv2.DescriptorExtractor_create("SIFT")

    points = []
    for z in range(0,img.shape[0],step):
        keypoints = detector.detect(img[z])
        if keypoints is None or len(keypoints) == 0:
            continue
        (keypoints, descriptors) = descriptorExtractor.compute(img[z],keypoints)       
        unique_index= np.unique( descriptors.dot(np.random.rand(128)),
                                 return_index=True)[1]
        points.extend(descriptors[unique_index])

        ## For debugging purpose:
        if DEBUG:
            img_color = cv2.cvtColor( img[z].astype('uint8'), cv2.cv.CV_GRAY2RGB )
            for y,x in F.transpose():
                cv2.circle( img_color,
                            (int(x),int(y)),
                            2,
                            (0,0,255),
                            -1)
            cv2.imwrite( "/tmp/"
                         + os.path.basename(f.rstrip('.nii'))
                         + "_" + str(z)
                         +".png", img_color )

    points = np.array(points)
    unique_index= np.unique( points.dot(np.random.rand(128)),
                                 return_index=True)[1]
    return points[unique_index]
def mask_data(f):
    file_id = f.split('/')[-3]
    seg = irtk.imread(f,force_neurological=True) > 0

    r = 10
    x_min,y_min,z_min,x_max,y_max,z_max = seg.bbox()
    seg = seg[max(0,z_min-3*r):min(z_max+3*r+1,seg.shape[0]),
              max(0,y_min-3*r):min(y_max+3*r+1,seg.shape[1]),
              max(0,x_min-3*r):min(x_max+3*r+1,seg.shape[2])]
    ball = morphology.ball( 5 )
    seg = irtk.Image( nd.binary_dilation(seg,ball), seg.get_header() )
    ball = morphology.ball( r )
    seg = irtk.Image( nd.binary_closing(seg,ball), seg.get_header() )
    
    seg = seg.bbox(crop=True)
        
    seg_file = output_dir + '/seg_' + file_id + ".nii.gz"
    irtk.imwrite( seg_file, seg )
示例#14
0
def predict(self,
            filename,
            ga,
            nb_autocontext=None,
            mask=None,
            debug=False,
            return_all=False):
    nb_labels = len(self.labels) + 1

    if nb_autocontext is None:
        nb_autocontext = len(glob(self.params['name'] + "_*"))

    img = irtk.imread(filename, dtype="float32")
    img = img.resample(pixelSize=self.params['resample'],
                       interpolation='linear').rescale(0, 1000)

    extra_layers = []

    if mask is None:
        mask = irtk.ones(img.get_header(), dtype="uint8")
        mask[img == 0] = 0
    else:
        mask = mask.resample(pixelSize=self.params['resample'],
                             interpolation='nearest').astype('uint8')

    metadata = None

    probas = predict_autocontext(self,
                                 img,
                                 mask,
                                 np.array(extra_layers, dtype="float32"),
                                 metadata,
                                 nb_labels,
                                 ga,
                                 nb_autocontext,
                                 debug=debug,
                                 return_all=return_all)

    return probas
def predict( self,
             filename,
             ga,
             nb_autocontext=None,
             mask=None,
             debug=False,
             return_all=False ):
    nb_labels = len(self.labels)+1
    
    if nb_autocontext is None:
        nb_autocontext = len(glob(self.params['name'] + "_*"))

    img = irtk.imread( filename, dtype="float32" )
    img = img.resample( pixelSize=self.params['resample'], interpolation='linear' ).rescale(0,1000)

    extra_layers = []

    if mask is None:
        mask = irtk.ones( img.get_header(), dtype="uint8" )
        mask[img==0] = 0
    else:
        mask = mask.resample( pixelSize=self.params['resample'], interpolation='nearest' ).astype('uint8')

    metadata = None
    
    probas = predict_autocontext( self,
                                  img,
                                  mask,
                                  np.array( extra_layers, dtype="float32" ),
                                  metadata,
                                  nb_labels,
                                  ga,
                                  nb_autocontext,
                                  debug=debug,
                                  return_all=return_all )
    
    return probas
def get_coordinates( f ):
    seg = irtk.imread( f )
    u,v,w = get_orientation_training(seg)

    M = np.array( [u,v,w], dtype='float32' ) # Change of basis matrix
    
    heart =  np.array(nd.center_of_mass( (seg == 5).view(np.ndarray) ),
                      dtype='float32')
    brain =  np.array(nd.center_of_mass( (seg == 2).view(np.ndarray) ),
                      dtype='float32')
    left_lung =  np.array(nd.center_of_mass( (seg == 3).view(np.ndarray) ),
                          dtype='float32')
    right_lung =  np.array(nd.center_of_mass( (seg == 4).view(np.ndarray) ),
                           dtype='float32')
    liver =  np.array(nd.center_of_mass( (seg == 8).view(np.ndarray) ),
                      dtype='float32')

    # centering and orient
    brain = np.dot( M, brain - heart)
    left_lung = np.dot( M, left_lung - heart)
    right_lung = np.dot( M, right_lung - heart)
    liver = np.dot( M, liver - heart)

    return np.array( [brain, left_lung, right_lung, liver], dtype='float32' ).flatten()
示例#17
0
def detect_mser(
    raw_file,
    ga,
    vocabulary,
    mser_detector,
    NEW_SAMPLING,
    output_folder="debug",
    DEBUG=False,
    return_image_regions=False,
):

    OFD = get_OFD(ga) / NEW_SAMPLING
    BPD = get_BPD(ga) / NEW_SAMPLING

    max_e = 0.64

    mser = cv2.MSER(
        _delta=5,
        _min_area=60,
        _max_area=14400,
        _max_variation=0.15,
        _min_diversity=0.1,
        _max_evolution=200,
        _area_threshold=1.01,
        _min_margin=0.003,
        _edge_blur_size=5,
    )

    sift = cv2.SIFT(nfeatures=0, nOctaveLayers=3, contrastThreshold=0.04, edgeThreshold=10, sigma=0.8)
    siftExtractor = cv2.DescriptorExtractor_create("SIFT")

    voca = np.load(open(vocabulary, "rb"))
    classifier = neighbors.NearestNeighbors(1, algorithm="kd_tree")
    N = voca.shape[0]
    classifier.fit(voca)
    # flann = pyflann.FLANN()
    # flann.build_index( voca.astype('float32') )

    svc = joblib.load(mser_detector)

    img = irtk.imread(raw_file, dtype="float32")
    img = img.resample2D(NEW_SAMPLING).saturate().rescale().astype("uint8")

    detections = []
    image_regions = []
    for z in range(img.shape[0]):
        detections.append([])
        image_regions.append([])

        # Extract MSER
        # print "extracting mser"
        contours = mser.detect(img[z, :, :])
        # print "mser done"

        if DEBUG:
            img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)
            for c in contours:
                ellipse = cv2.fitEllipse(np.array(map(lambda x: [x], c), dtype="int32"))
                cv2.ellipse(img_color, (ellipse[0], (ellipse[1][0], ellipse[1][1]), ellipse[2]), (0, 0, 255))

            cv2.imwrite(output_folder + "/" + str(z) + "_all_mser_.png", img_color)

            img_color_mser = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)

        # Filter MSER
        selected_mser = []
        mask = np.zeros((img.shape[1], img.shape[2]), dtype="uint8")
        # print "fitting ellipses"
        for c in contours:
            ellipse = cv2.fitEllipse(np.reshape(c, (c.shape[0], 1, 2)).astype("int32"))

            # filter by size
            if ellipse[1][0] > OFD or ellipse[1][1] > OFD or ellipse[1][0] < 0.5 * OFD or ellipse[1][1] < 0.5 * OFD:
                continue

            # filter by eccentricity
            if math.sqrt(1 - (np.min(ellipse[1]) / np.max(ellipse[1])) ** 2) > max_e:
                continue

            cv2.ellipse(mask, ellipse, 255, -1)
            selected_mser.append((c, ellipse))

        # print "ellipses done"
        if len(selected_mser) == 0:
            continue

        # Extract SIFT
        # print "extracting SIFT"
        keypoints = sift.detect(img[z, :, :], mask=mask)
        # print "SIFT done"
        if keypoints is None or len(keypoints) == 0:
            continue
        (keypoints, descriptors) = siftExtractor.compute(img[z, :, :], keypoints)

        # words = np.zeros(len(keypoints),dtype="int")
        # for i,d in enumerate(descriptors):
        #     words[i] = classifier.kneighbors(d, return_distance=False)
        words = classifier.kneighbors(descriptors, return_distance=False)
        # words, dist = flann.nn_index( descriptors.astype('float32') )

        for i, (c, ellipse) in enumerate(selected_mser):
            # Compute histogram
            hist = np.zeros(N, dtype="float")
            for ki, k in enumerate(keypoints):
                if is_in_ellipse(k.pt, ellipse):
                    hist[words[ki]] += 1

            # Normalize histogram
            norm = np.linalg.norm(hist)
            if norm > 0:
                hist /= norm

            cl = svc.predict(hist).flatten()

            if DEBUG:
                if cl == 1:
                    opacity = 0.4
                    img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)
                    for p in c:
                        img_color[p[1], p[0], :] = (1 - opacity) * img_color[p[1], p[0], :] + opacity * np.array(
                            [0, 255, 0]
                        )
                    cv2.imwrite(output_folder + "/" + str(z) + "_" + str(i) + "_region.png", img_color)

                img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)

                cv2.ellipse(img_color, (ellipse[0], (ellipse[1][0], ellipse[1][1]), ellipse[2]), (0, 0, 255))
                for k_id, k in enumerate(keypoints):
                    if is_in_ellipse(k.pt, ellipse):
                        if cl == 1:
                            cv2.circle(img_color, (int(k.pt[0]), int(k.pt[1])), 2, (0, 255, 0), -1)
                        else:
                            cv2.circle(img_color, (int(k.pt[0]), int(k.pt[1])), 2, (0, 0, 255), -1)
                cv2.imwrite(output_folder + "/" + str(z) + "_" + str(i) + ".png", img_color)

                cv2.ellipse(
                    img_color_mser,
                    (ellipse[0], (ellipse[1][0], ellipse[1][1]), ellipse[2]),
                    (0, 255, 0) if cl == 1 else (0, 0, 255),
                )

            if cl == 1:
                ellipse_center = [ellipse[0][0], ellipse[0][1], z]
                print c.shape
                if return_image_regions:
                    image_regions[-1].append((ellipse_center, c))
                else:
                    region = np.hstack((c, [[z]] * c.shape[0]))
                    detections[-1].append((img.ImageToWorld(ellipse_center), img.ImageToWorld(region)))

        if DEBUG:
            cv2.imwrite(output_folder + "/" + str(z) + "_color_mser.png", img_color_mser)

    if return_image_regions:
        return image_regions
    else:
        return detections
示例#18
0
#!/usr/bin/python

import irtk
import cv2

mask = irtk.imread("mask.nii", dtype='uint8')
irtk.imwrite("mask.png", mask)

img = irtk.Image(cv2.imread("lena.png", 0))

irtk.imshow(img,
            mask,
            filename="initialisation.png",
            colors={
                1: (255, 0, 0),
                2: (0, 255, 0)
            },
            opacity=1.0)

mask2 = irtk.imread("mask2.nii", dtype='uint8')
irtk.imwrite("mask2.png", mask2)
irtk.imshow(img,
            mask2,
            filename="initialisation2.png",
            colors={
                1: (255, 0, 0),
                2: (0, 255, 0)
            },
            opacity=1.0)
示例#19
0
    def getIRTKDtls(self, fileName):
        header, dtype = irtk._irtk.get_header(fileName)
        img = irtk.imread(fileName, dtype='float32')

        irtkDtls = []
        irtkDtls.append(
            ("Image Size",
             str(header['dim'][0]) + "\t" + str(header['dim'][1]) + "\t" +
             str(header['dim'][2]) + "\t" + str(header['dim'][3]) + "\t"))
        irtkDtls.append(
            ("Voxel Size (mm)", format(header['pixelSize'][0], '.3f') + "\t" +
             format(header['pixelSize'][1], '.3f') + "\t" +
             format(header['pixelSize'][2], '.3f') + "\t" +
             format(header['pixelSize'][3], '.3f') + "\t"))
        irtkDtls.append(("Origin", format(header['origin'][0], '.3f') + "\t" +
                         format(header['origin'][1], '.3f') + "\t" +
                         format(header['origin'][2], '.3f') + "\t" +
                         format(header['origin'][3], '.3f') + "\t"))
        irtkDtls.append(("X Axis", format(header['orientation'][0, 0], '.3f') +
                         "\t" + format(header['orientation'][0, 1], '.3f') +
                         "\t" + format(header['orientation'][0, 2], '.3f')))
        irtkDtls.append(("Y Axis", format(header['orientation'][1, 0], '.3f') +
                         "\t" + format(header['orientation'][1, 1], '.3f') +
                         "\t" + format(header['orientation'][1, 2], '.3f')))
        irtkDtls.append(("Z Axis", format(header['orientation'][2, 0], '.3f') +
                         "\t" + format(header['orientation'][2, 1], '.3f') +
                         "\t" + format(header['orientation'][2, 2], '.3f')))

        irtkDtls.append(("Ordering", img.order()))
        orientation = img.orientation()
        irtkDtls.append(
            ("Orientation",
             orientation[0] + "\t" + orientation[1] + "\t" + orientation[2]))
        irtkDtls.append(("Data Type", dtype))

        irtkDtls.append(("Min-Max", format(float(img.min()), '.3f') + "\t" +
                         format(float(img.max()), '.3f')))
        irtkDtls.append(("Image to World", format(img.I2W[0, 0], '.3f') +
                         "\t" + format(img.I2W[0, 1], '.3f') + "\t" +
                         format(img.I2W[0, 2], '.3f') + "\t" +
                         format(img.I2W[0, 3], '.3f') + "\n" +
                         format(img.I2W[1, 0], '.3f') + "\t" +
                         format(img.I2W[1, 1], '.3f') + "\t" +
                         format(img.I2W[1, 2], '.3f') + "\t" +
                         format(img.I2W[1, 3], '.3f') + "\n" +
                         format(img.I2W[2, 0], '.3f') + "\t" +
                         format(img.I2W[2, 1], '.3f') + "\t" +
                         format(img.I2W[2, 2], '.3f') + "\t" +
                         format(img.I2W[2, 3], '.3f') + "\n" +
                         format(img.I2W[3, 0], '.3f') + "\t" +
                         format(img.I2W[3, 1], '.3f') + "\t" +
                         format(img.I2W[3, 2], '.3f') + "\t" +
                         format(img.I2W[3, 3], '.3f')))
        irtkDtls.append(("World to Image", format(img.W2I[0, 0], '.3f') +
                         "\t" + format(img.W2I[0, 1], '.3f') + "\t" +
                         format(img.W2I[0, 2], '.3f') + "\t" +
                         format(img.W2I[0, 3], '.3f') + "\n" +
                         format(img.W2I[1, 0], '.3f') + "\t" +
                         format(img.W2I[1, 1], '.3f') + "\t" +
                         format(img.W2I[1, 2], '.3f') + "\t" +
                         format(img.W2I[1, 3], '.3f') + "\n" +
                         format(img.W2I[2, 0], '.3f') + "\t" +
                         format(img.W2I[2, 1], '.3f') + "\t" +
                         format(img.W2I[2, 2], '.3f') + "\t" +
                         format(img.W2I[2, 3], '.3f') + "\n" +
                         format(img.W2I[3, 0], '.3f') + "\t" +
                         format(img.W2I[3, 1], '.3f') + "\t" +
                         format(img.W2I[3, 2], '.3f') + "\t" +
                         format(img.W2I[3, 3], '.3f')))
        return irtkDtls
示例#20
0
    return noise.std()


def get_noiseZ(img):
    img = img.astype('float32')
    new_img = np.zeros(img.shape, dtype='float32')
    for x in xrange(img.shape[2]):
        new_img[:, :, x] = nd.gaussian_filter(img[:, :, x], 2, mode='reflect')
    noise = img - new_img
    #print "Noise Z:", noise.std(), img.std()
    return noise.std()


output_filename = sys.argv[3]

img = irtk.imread(sys.argv[1], dtype='float64').saturate()
mask = irtk.imread(sys.argv[2], dtype='int16')
mask = irtk.Image(mask, img.get_header())

# crop
x_min, y_min, z_min, x_max, y_max, z_max = mask.bbox()
mask = mask[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]
tmp_img = img[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]

downsampled_img = tmp_img.resample(2)
mask = mask.transform(target=downsampled_img, interpolation='nearest')

seg = irtk.graphcut(downsampled_img,
                    mask,
                    sigma=get_noiseXY(downsampled_img),
                    sigmaZ=get_noiseZ(downsampled_img))
def detect_mser( f,
                 ga,
                 vocabulary,
                 mser_detector,
                 output_folder="debug",
                 DEBUG=False ):

    max_e = 0.64
    OFD = get_OFD(30.0,centile=50) 
    BPD = get_BPD(30.0,centile=50)

    mser = cv2.MSER( _delta=5,
                     _min_area=60,
                     _max_area=14400,
                     _max_variation=0.15,
                     _min_diversity=.1,
                     _max_evolution=200,
                     _area_threshold=1.01,
                     _min_margin=0.003,
                     _edge_blur_size=5)

    sift = cv2.SIFT( nfeatures=0,
                     nOctaveLayers=3,
                     contrastThreshold=0.04,
                     edgeThreshold=10,
                     sigma=0.8)
    siftExtractor = cv2.DescriptorExtractor_create("SIFT")

    voca = np.load( open(vocabulary, 'rb') )
    nn_classifier = neighbors.NearestNeighbors(1,algorithm='kd_tree')
    N = voca.shape[0] 
    nn_classifier.fit(voca)

    classifier = joblib.load(mser_detector)
    classifier.classes_ = np.array([0,1],dtype='int')

    img = irtk.imread( f,
                       dtype='float32',
                       force_neurological=False ).saturate(1,99).rescale()

    img = resampleOFD( img, ga ).astype('uint8')

    detected_centers = []
    detected_regions = []
    for z in range(img.shape[0]):
        
        # Extract MSER
        #print "extracting mser"
        contours = mser.detect(img[z])
        #print "mser done"
        
        if DEBUG:
            img_color = cv2.cvtColor( img[z], cv2.cv.CV_GRAY2RGB )
            for c in contours:
                ellipse = cv2.fitEllipse(np.array(map(lambda x:[x],
                                                  c),dtype='int32'))
                cv2.ellipse( img_color, (ellipse[0],
                                         (ellipse[1][0],ellipse[1][1]),
                                         ellipse[2]) , (0,0,255))

            cv2.imwrite(output_folder + "/" +str(z) + "_all_mser_.png",img_color )

            img_color_mser = cv2.cvtColor( img[z], cv2.cv.CV_GRAY2RGB )

        # Filter MSER
        selected_mser = []
        mask = np.zeros( (img.shape[1],img.shape[2]), dtype='uint8' )
        #print "fitting ellipses"
        for c in contours:
            ellipse = cv2.fitEllipse(np.reshape(c, (c.shape[0],1,2) ).astype('int32'))

            # filter by size
            if ( ellipse[1][0] > OFD
                 or ellipse[1][1] > OFD
                 or ellipse[1][0] < 0.5*OFD
                 or ellipse[1][1] < 0.5*OFD ) :
                continue

            # filter by eccentricity
            if math.sqrt(1-(np.min(ellipse[1])/np.max(ellipse[1]))**2) > max_e:
                continue

            cv2.ellipse( mask, ellipse, 255, -1 )
            selected_mser.append((c,ellipse))

        #print "ellipses done"
        if len(selected_mser) == 0:
            continue

        # Extract SIFT
        #print "extracting SIFT"
        keypoints = sift.detect(img[z],mask=mask)
        #print "SIFT done"
        if keypoints is None or len(keypoints) == 0:
            continue
        (keypoints, descriptors) = siftExtractor.compute(img[z],keypoints)

        words = nn_classifier.kneighbors(descriptors, return_distance=False)
        
        for i,(c,ellipse) in enumerate(selected_mser):
            # Compute histogram
            hist = np.zeros(N, dtype='float')
            for ki,k in enumerate(keypoints):
                if is_in_ellipse(k.pt,ellipse):
                    hist[words[ki]] += 1

            # Normalize histogram
            norm = np.linalg.norm(hist)
            if norm > 0:
                hist /= norm

            cl = classifier.predict(hist)

            if DEBUG:
                if cl == 1:
                    opacity = 0.4
                    img_color = cv2.cvtColor( img[z], cv2.cv.CV_GRAY2RGB )
                    for p in c:
                        img_color[p[1],p[0],:] = (
                            (1-opacity)*img_color[p[1],p[0],:]
                            + opacity * np.array([0,255,0])
                            )
                    cv2.imwrite(output_folder + "/"+str(z) + '_' +str(i) +"_region.png",img_color)

                img_color = cv2.cvtColor( img[z], cv2.cv.CV_GRAY2RGB )
                
                cv2.ellipse( img_color, (ellipse[0],
                                         (ellipse[1][0],ellipse[1][1]),
                                         ellipse[2]) , (0,0,255))
                for k_id,k in enumerate(keypoints):
                    if is_in_ellipse(k.pt,ellipse):
                        if cl == 1:
                            cv2.circle( img_color,
                                        (int(k.pt[0]),int(k.pt[1])),
                                        2,
                                        (0,255,0),
                                        -1)
                        else:
                            cv2.circle( img_color,
                                        (int(k.pt[0]),int(k.pt[1])),
                                        2,
                                        (0,0,255),
                                        -1)
                cv2.imwrite(output_folder + "/"+str(z) + '_' +str(i) +".png",img_color)

                cv2.ellipse( img_color_mser, (ellipse[0],
                                         (ellipse[1][0],ellipse[1][1]),
                                         ellipse[2]),
                             (0,255,0) if cl == 1 else (0,0,255) )

            if cl == 1:
                ellipse_center = [z,ellipse[0][1],ellipse[0][0]]
                detected_centers.append( ellipse_center )
                detected_regions.append( c )
            
        if DEBUG:
            cv2.imwrite(output_folder + "/"+str(z) + "_color_mser.png",img_color_mser)


    return np.array(detected_centers, dtype='int32'), np.array(detected_regions)
示例#22
0
#!/usr/bin/python

import irtk
import sys
import scipy.ndimage as nd

input_file = sys.argv[1]
output_file = sys.argv[2]

img = irtk.imread(input_file, dtype='float32')

img = irtk.Image(nd.median_filter(img.get_data(), 5), img.get_header())
irtk.imwrite(output_file, img)
import irtk
from glob import glob
import os

def get_ED_ES(patient_id):
    f = "/vol/biomedic/users/kpk09/DATASETS/CETUS_data/Training/Images/"+patient_id+"/"+patient_id+"_ED_ES_time.txt"
    f = open(f,"rb")
    res = []
    for line in f:
        line = line.rstrip() # chomp
        res.append( int(line.split(' ')[-1]))
    return res

all_files = glob("nifti/*_seg.nii.gz")

m = 0.0
n = 0.0
for f in all_files:
    patient_id = os.path.basename(f).split("_")[0]
    frame_id = int(os.path.basename(f).split("_")[1][len("frame"):])
    ED_ES = get_ED_ES(patient_id)
    print patient_id, frame_id, ED_ES
    if frame_id == ED_ES[1]:
        mask = irtk.imread(f).resample(0.001, interpolation='nearest')
        m += mask.sum()
        n += 1

m /= n

print "mean heart volume:",m
import numpy as np
import irtk

def get_center_brain_detection(f,world=True):
    input_mask = irtk.imread( f, force_neurological=False )
    mask = irtk.ones( input_mask.get_header(), dtype='uint8' )
    mask[input_mask == 2] = 0
    x_min, y_min, z_min, x_max, y_max, z_max = map( float, mask.bbox() )
    center = [ x_min + (x_max-x_min)/2,
               y_min + (y_max-y_min)/2,
               z_min + (z_max-z_min)/2 ]
    if not world:
        center = np.array(center,dtype='float64')[::-1]
        return center
    else:
        center = input_mask.ImageToWorld(center)
        return center

def get_box_center((z,y,x),(d,h,w),f):
    img = irtk.imread( f, empty=True, force_neurological=False )
    center = np.array( [ x+w/2,
                         y+h/2,
                         z+d/2 ], dtype='float' )
    center = img.ImageToWorld(center)
    return center
示例#25
0
def mask_image(file_img, file_mask, ga, r, neigh, output_dir):
    img = irtk.imread(file_img, dtype='float32')

    input_mask = irtk.imread(file_mask)

    print "predicting..."
    res = irtk.zeros(img.get_header(), dtype='float32')
    res2 = irtk.zeros(img.get_header(), dtype='float32')
    res3 = irtk.zeros(img.get_header(), dtype='float32')
    res4 = irtk.zeros(img.get_header(), dtype='uint8')
    mask = irtk.ones(input_mask.get_header(), dtype='uint8')
    mask[input_mask == 2] = 0
    for z in xrange(img.shape[0]):
        print z
        YX = np.transpose(np.nonzero(mask[z]))
        if YX.shape[0] == 0:
            continue  # this slice does not intersect the box
        patches = extract_patches2D(img[z], r, YX)
        patches = np.reshape(
            patches, (patches.shape[0], patches.shape[1] * patches.shape[2]))

        predictions = neigh.predict_proba(patches)[:, 1]
        res[z, YX[:, 0], YX[:, 1]] = predictions

    x_min, y_min, z_min, x_max, y_max, z_max = mask.bbox()

    proba = res[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]

    if args.mass:
        BV = get_BV(args.ga)
        box_volume = (z_max - z_min) * img.header['pixelSize'][2] * (
            y_max - y_min) * img.header['pixelSize'][1] * (
                x_max - x_min) * img.header['pixelSize'][0]
        ratio = float(BV) / float(box_volume)
        print "ratio", ratio
        q0, q1 = mquantiles(proba.flatten(),
                            prob=[0.5 * (1.0 - ratio), 1.0 - 0.5 * ratio])
        print "threshold", q0, q1
        #threshold = max(0.5,threshold)

        # labels = res[z_min:z_max+1,
        #              y_min:y_max+1,
        #              x_min:x_max+1] > threshold

        #res = 1 / (np.exp(-(res-threshold)/(res.max()-res.min())))

        res[res < q0] = q0
        res[res > q1] = q1
        res -= res.min()
        res /= res.max()

    labels = res[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1] > 0.5

    proba = res[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]

    cropped_img = img[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]

    if args.do_3D:
        labels = irtk.crf(cropped_img,
                          labels,
                          proba,
                          l=args.l,
                          sigma=get_noiseXY(cropped_img),
                          sigmaZ=get_noiseZ(cropped_img))
    # elif args.do_patchZ:
    #     labels = irtk.crf_patchZ( cropped_img,
    #                               labels,
    #                               proba,
    #                               l=10.0 )
    # else:
    #     for z in xrange(z_min,z_max+1):
    #         labels[z] = irtk.crf( cropped_img[z],
    #                               labels[z],
    #                               proba[z],
    #                               l=1.0 )

    print "MAX LABEL:", labels.max()
    irtk.imwrite(output_dir + "/bare_" + os.path.basename(file_img), labels)
    tmp = irtk.zeros(img.get_header(), dtype='uint8')
    tmp[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1] = labels
    (min_x_bare, min_y_bare, min_z_bare, max_x_bare, max_y_bare,
     max_z_bare) = tmp.bbox()

    if not args.no_cleaning:
        # clean by fitting ellipses enlarged of 10%
        for z in xrange(labels.shape[0]):
            edges = nd.morphological_gradient(labels[z] > 0, size=5)
            points = np.transpose(edges.nonzero())[:, ::-1]
            if len(points) == 0:
                continue
            points = np.array(map(lambda x: [x], points), dtype='int32')
            ellipse = cv2.fitEllipse(points)
            cv2.ellipse(
                labels[z],
                (ellipse[0],
                 (1.1 * ellipse[1][0], 1.1 * ellipse[1][1]), ellipse[2]), 1,
                -1)

    irtk.imwrite(output_dir + "/seg_" + os.path.basename(file_img), labels)
    irtk.imwrite(output_dir + "/res_" + os.path.basename(file_img), res)

    # re-read the image in case we processed it
    img = irtk.imread(file_img, dtype='float32')
    cropped_img = img[z_min:z_max + 1, y_min:y_max + 1, x_min:x_max + 1]
    cropped_img[labels == 0] = -1
    masked = cropped_img.bbox(crop=True)
    irtk.imwrite(output_dir + "/masked_" + os.path.basename(file_img), masked)

    # re-read the image in case we processed it
    img = irtk.imread(file_img, dtype='float32')
    x0 = min_x_bare + (max_x_bare - min_x_bare) / 2
    y0 = min_y_bare + (max_y_bare - min_y_bare) / 2
    ofd = get_OFD(ga) / img.header['pixelSize'][0]

    cropped_img = img[min_z_bare:max_z_bare + 1,
                      max(0, int(round(y0 - ofd / 2))
                          ):min(img.shape[1], int(round(y0 + ofd / 2 + 1))),
                      max(0, int(round(x0 - ofd / 2))
                          ):min(img.shape[2], int(round(x0 + ofd / 2 +
                                                        1)))].copy()

    irtk.imwrite(output_dir + "/very_large_" + os.path.basename(file_img),
                 cropped_img)

    cropped_proba = res[min_z_bare:max_z_bare + 1,
                        max(0, int(round(y0 - ofd / 2))
                            ):min(img.shape[1], int(round(y0 + ofd / 2 + 1))),
                        max(0, int(round(x0 - ofd / 2))
                            ):min(img.shape[2], int(round(x0 + ofd / 2 +
                                                          1)))].copy()

    irtk.imwrite(output_dir + "/proba_" + os.path.basename(file_img),
                 cropped_proba)
示例#26
0
def mask_image( file_img, file_mask, ga, r, neigh, output_dir ):
    img = irtk.imread( file_img, dtype='float32' )

    input_mask = irtk.imread( file_mask )
    
    print "predicting..."
    res = irtk.zeros( img.get_header(), dtype='float32' )
    res2 = irtk.zeros( img.get_header(), dtype='float32' )
    res3 = irtk.zeros( img.get_header(), dtype='float32' )
    res4 = irtk.zeros( img.get_header(), dtype='uint8' )
    mask = irtk.ones( input_mask.get_header(), dtype='uint8' )
    mask[input_mask == 2] = 0
    for z in xrange(img.shape[0]):
        print z
        YX = np.transpose( np.nonzero( mask[z] ) )
        if YX.shape[0] == 0:
            continue # this slice does not intersect the box
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )

        predictions = neigh.predict_proba(patches)[:,1]
        res[z,YX[:,0],YX[:,1]] = predictions

    x_min, y_min, z_min, x_max, y_max, z_max = mask.bbox()

    proba = res[z_min:z_max+1,
                y_min:y_max+1,
                x_min:x_max+1]

    if args.mass:
        BV = get_BV( args.ga )
        box_volume = (z_max-z_min)*img.header['pixelSize'][2]*(y_max-y_min)*img.header['pixelSize'][1]*(x_max-x_min)*img.header['pixelSize'][0]
        ratio = float(BV) / float(box_volume)
        print "ratio", ratio
        q0,q1 = mquantiles( proba.flatten(), prob=[0.5*(1.0-ratio),
                                                   1.0-0.5*ratio] )
        print "threshold", q0,q1
        #threshold = max(0.5,threshold)
    
        # labels = res[z_min:z_max+1,
        #              y_min:y_max+1,
        #              x_min:x_max+1] > threshold
        
    #res = 1 / (np.exp(-(res-threshold)/(res.max()-res.min())))

        res[res<q0] = q0
        res[res>q1] = q1
        res -= res.min()
        res /= res.max()

    labels = res[z_min:z_max+1,
                 y_min:y_max+1,
                 x_min:x_max+1] > 0.5
   
    proba = res[z_min:z_max+1,
                y_min:y_max+1,
                x_min:x_max+1]
    
    cropped_img = img[z_min:z_max+1,
                      y_min:y_max+1,
                      x_min:x_max+1]

    if args.do_3D:
        labels = irtk.crf( cropped_img,
                           labels,
                           proba,
                           l=args.l,
                           sigma=get_noiseXY(cropped_img),
                           sigmaZ=get_noiseZ(cropped_img) )
    # elif args.do_patchZ:
    #     labels = irtk.crf_patchZ( cropped_img,
    #                               labels,
    #                               proba,
    #                               l=10.0 )   
    # else:
    #     for z in xrange(z_min,z_max+1):
    #         labels[z] = irtk.crf( cropped_img[z],
    #                               labels[z],
    #                               proba[z],
    #                               l=1.0 )

    print "MAX LABEL:", labels.max()
    irtk.imwrite(output_dir + "/bare_"+os.path.basename(file_img), labels )
    tmp = irtk.zeros( img.get_header(), dtype='uint8' )
    tmp[z_min:z_max+1,
        y_min:y_max+1,
        x_min:x_max+1] = labels
    ( min_x_bare, min_y_bare, min_z_bare,
      max_x_bare, max_y_bare, max_z_bare ) = tmp.bbox()
    
    if not args.no_cleaning:
        # clean by fitting ellipses enlarged of 10%
        for z in xrange(labels.shape[0]):
            edges = nd.morphological_gradient( labels[z] > 0,size=5 )
            points = np.transpose(edges.nonzero())[:,::-1]
            if len(points) == 0:
                continue
            points = np.array(map(lambda x:[x],points),dtype='int32')
            ellipse = cv2.fitEllipse(points)
            cv2.ellipse( labels[z], (ellipse[0],
                                     (1.1*ellipse[1][0],1.1*ellipse[1][1]),
                                     ellipse[2]) , 1, -1 )

    irtk.imwrite(output_dir + "/seg_"+os.path.basename(file_img), labels )
    irtk.imwrite(output_dir + "/res_"+os.path.basename(file_img), res )

    # re-read the image in case we processed it
    img = irtk.imread( file_img, dtype='float32' )
    cropped_img = img[z_min:z_max+1,
                      y_min:y_max+1,
                      x_min:x_max+1]
    cropped_img[labels==0] = -1
    masked = cropped_img.bbox(crop=True)
    irtk.imwrite(output_dir + "/masked_"+os.path.basename(file_img), masked )

    # re-read the image in case we processed it
    img = irtk.imread( file_img, dtype='float32' )    
    x0 = min_x_bare + (max_x_bare - min_x_bare) / 2
    y0 = min_y_bare + (max_y_bare - min_y_bare) / 2
    ofd = get_OFD(ga)/img.header['pixelSize'][0]

    cropped_img = img[min_z_bare:max_z_bare+1,
                      max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
                      max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()

    irtk.imwrite(output_dir + "/very_large_"+os.path.basename(file_img),
                 cropped_img )
    
    cropped_proba = res[min_z_bare:max_z_bare+1,
                        max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
                        max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()

    irtk.imwrite(output_dir + "/proba_"+os.path.basename(file_img),
                 cropped_proba )    
def run_detection( filename, ga, output_folder ):
    file_id = os.path.basename(filename).split('.')[0]
    if '_' in os.path.basename(filename):
        patient_id = file_id.split('_')[0]
    else:
        patient_id = file_id
    print patient_id 
    
    # brain detection
    vocabulary = "../brain-detector/trained_model/vocabulary_0.npy"
    mser_detector = "../brain-detector/trained_model/mser_detector_0_LinearSVC"
    mask_file = output_folder +"/" + file_id + "/brain_mask.nii.gz"
    cmd = [ "python",
            "../brain-detector/fetalMask_detection.py",
            filename,
            str(ga),
            mask_file,
            "--classifier", mser_detector,
            "--vocabulary", vocabulary
            ]
    print ' '.join(cmd)
    
    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE )
    (out, err) = proc.communicate()

    print out
    print err

    # heart and body detection

    ## preprocessing (resampling+denoising)
    img = irtk.imread( filename, dtype='float32', force_neurological=True )
    img = irtk.Image(nd.median_filter(img.view(np.ndarray),(3,5,5)),img.get_header())

    scale = get_CRL(ga)/get_CRL(30.0)

    img = img.resample( 1.0*scale, interpolation='bspline' )
    
    brain_center = img.WorldToImage( get_center_brain_detection(mask_file) )[::-1]
    
    new_filename = output_folder + "/" + file_id + "/" + os.path.basename(filename)
    
    irtk.imwrite(new_filename,img)
    
    n_jobs = 5

    output_folder1 = output_folder + "/" + file_id + "/prediction_1/"
    output_folder2 = output_folder + "/" + file_id + "/prediction_2"
    detector1 = "trained_model/stage1"
    detector2 = "trained_model/stage2"
    shape_model = "trained_model/stage1/shape_model.pk"

    cmd = [ "python", "predict.py",
            "--input", new_filename,
            "--output", output_folder1,
            "--output2", output_folder2,
            "--detector", detector1,
            "--detector2", detector2,
            "--padding", str(10),
            "--chunk_size", str(int(1e5)),
            "--n_jobs", str(n_jobs),
            "--brain_center"] + map(str,brain_center) + \
            ["--shape_model",shape_model,
             "-l", str(0.5),
             "--shape_optimisation",
             "--narrow_band",
             "--selective"]

    print ' '.join(cmd)

    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    (out, err) = proc.communicate()

    print out
    print err
   
    return
    print args
    
start = time()
    
detector = heartdetector.HeartDetector( name=args.forest )
detector.load()

if not args.time:
    print detector

if not os.path.exists("predictions/"+args.patient_id):
    os.makedirs("predictions/"+args.patient_id)

all_frames = sorted(glob("denoised/"+args.patient_id+"_frame*.nii.gz"))

tmp = irtk.imread(all_frames[0])
mask = irtk.zeros(tmp.get_header(),dtype='float32')
for f in all_frames:
    if "_seg" in f:
        continue
    mask += irtk.imread(f)

mask = (mask > 0).astype('uint8')

# if args.frame is not None:
#     all_frames = [all_frames[args.frame]]
# elif not args.all:
#     ED,ES = get_ED_ES(args.patient_id)
#     all_frames = [all_frames[ED],all_frames[ES]]
    
for f in all_frames:
示例#29
0
def preprocess_training_data(patient_id,
                             img_folder,
                             seg_folder,
                             resample,
                             offline=False,
                             online=True):
    if offline or online:
        if (offline and os.path.exists("offline_preprocessing/" + patient_id +
                                       "_img.nii.gz")
                and os.path.exists("offline_preprocessing/" + patient_id +
                                   "_seg.nii.gz")):
            return
        img = irtk.imread(img_folder + "/" + patient_id + ".nii.gz",
                          dtype='float32')
        seg = irtk.imread(seg_folder + "/" + patient_id + "_seg.nii.gz",
                          dtype="uint8")

        wall = nd.binary_dilation(
            seg,
            morphology.ball(int(12.5 * 0.001 / seg.header['pixelSize'][0])))
        wall = wall.astype('int')
        points = np.transpose(np.nonzero(wall))[::4]
        center, S, V = fit_ellipsoidPCA(points)
        if V[0, 0] < 0:
            V *= -1

        points = np.transpose(np.nonzero(wall))
        projections = np.dot(points - center, V[0])

        # valves
        index = projections > (projections.max() -
                               40.0 * 0.001 / seg.header['pixelSize'][0])

        #print "VALVE size:",np.sum(index), projections.max(), 40.0*0.001/seg.header['pixelSize'][0]

        wall[points[index, 0], points[index, 1], points[index, 2]] = 2

        #print "VALVE1", wall.max()

        wall = irtk.Image(wall, seg.get_header())

        img = img.resample(pixelSize=resample,
                           interpolation='linear').rescale(0, 1000)
        seg = seg.transform(target=img,
                            interpolation="nearest").astype('uint8')
        wall = wall.transform(target=img,
                              interpolation="nearest").astype('uint8')

        wall[seg > 0] = 0
        seg[wall == 1] = 2
        seg[wall == 2] = 3

        #print "VALVE2", seg.max()

        #irtk.imwrite("debug/"+patient_id+"_border.nii.gz",seg)

        seg[img == 0] = 255

        if offline:
            irtk.imwrite("offline_preprocessing/" + patient_id + "_img.nii.gz",
                         img)
            irtk.imwrite("offline_preprocessing/" + patient_id + "_seg.nii.gz",
                         seg)
            return

    if not online:
        img = irtk.imread("offline_preprocessing/" + patient_id +
                          "_img.nii.gz")
        seg = irtk.imread("offline_preprocessing/" + patient_id +
                          "_seg.nii.gz")

    mask = irtk.ones(img.get_header(), dtype='uint8')
    mask[img == 0] = 0

    return {
        'patient_id': patient_id,
        'img': img,
        'seg': seg,
        'extra_layers': np.array([], dtype='float32'),
        'metadata': None,
        'mask': mask
    }
示例#30
0
    print args

start = time()

detector = heartdetector.HeartDetector(name=args.forest)
detector.load()

if not args.time:
    print detector

if not os.path.exists("predictions/" + args.patient_id):
    os.makedirs("predictions/" + args.patient_id)

all_frames = sorted(glob("denoised/" + args.patient_id + "_frame*.nii.gz"))

tmp = irtk.imread(all_frames[0])
mask = irtk.zeros(tmp.get_header(), dtype='float32')
for f in all_frames:
    if "_seg" in f:
        continue
    mask += irtk.imread(f)

mask = (mask > 0).astype('uint8')

# if args.frame is not None:
#     all_frames = [all_frames[args.frame]]
# elif not args.all:
#     ED,ES = get_ED_ES(args.patient_id)
#     all_frames = [all_frames[ED],all_frames[ES]]

for f in all_frames:
示例#31
0
sizes3 = np.load( args.detector + '/sizes3.npy' )
offsets4 = np.load( args.detector + '/offsets4.npy' )
sizes4 = np.load( args.detector + '/sizes4.npy' )
offsets5 = np.load( args.detector + '/offsets5.npy' )
offsets6 = np.load( args.detector + '/offsets6.npy' )

clf_heart = joblib.load( args.detector + '/clf_heart' )
reg_heart = joblib.load( args.detector + '/reg_heart' )

clf_heart.set_params(n_jobs=args.n_jobs)
reg_heart.set_params(n_jobs=args.n_jobs)

print "done loading detectors"
print "preprocessing..."

img = irtk.imread( args.input, dtype='float32', force_neurological=True )

grad = irtk.Image(nd.gaussian_gradient_magnitude( img, 0.5 ),
              img.get_header())

sat = integral_image(img)
sat_grad = integral_image(grad)

blurred_img = nd.gaussian_filter(img,0.5)
gradZ = nd.sobel( blurred_img, axis=0 ).astype('float32')
gradY = nd.sobel( blurred_img, axis=1 ).astype('float32')
gradX = nd.sobel( blurred_img, axis=2 ).astype('float32')

irtk.imwrite(args.output + "/img.nii.gz", img)
irtk.imwrite(args.output + "/grad.nii.gz", grad)
#!/usr/bin/python

import sys, os
import irtk
import numpy as np


def rand(scale):
    return float(scale) * (np.random.random(1) - 0.5) * 2


img = irtk.imread(sys.argv[1])
seg = irtk.imread(sys.argv[2])
prefix = sys.argv[3]

tx, ty, tz = img.ImageToWorld([(img.shape[2] - 1) / 2, (img.shape[1] - 1) / 2,
                               (img.shape[0] - 1) / 2])
centering = irtk.RigidTransformation(tx=-tx, ty=-ty, tz=-tz)

t = irtk.RigidTransformation(tx=rand(img.header['pixelSize'][0] * 10),
                             ty=rand(img.header['pixelSize'][1] * 10),
                             tz=rand(img.header['pixelSize'][2] * 10),
                             rx=rand(30),
                             ry=rand(30),
                             rz=rand(30))
print t
t = centering.invert() * t * centering

new_img = img.transform(t, target=img.get_header(), interpolation='linear')
new_seg = seg.transform(t, target=img.get_header(), interpolation='nearest')
def preprocess_training_data( patient_id,
                              img_folder,
                              seg_folder,
                              resample,
                              offline=False,
                              online=True):
    if offline or online:
        if ( offline
             and os.path.exists( "offline_preprocessing/"+patient_id+"_img.nii.gz" )
             and os.path.exists( "offline_preprocessing/"+patient_id+"_seg.nii.gz" ) ):
                 return
        img = irtk.imread( img_folder + "/" + patient_id + ".nii.gz",
                           dtype='float32' )
        seg = irtk.imread( seg_folder +"/"+patient_id+"_seg.nii.gz",
                           dtype="uint8" )

        wall = nd.binary_dilation( seg,
                                   morphology.ball(int(12.5*0.001/seg.header['pixelSize'][0])) )
        wall = wall.astype('int')
        points = np.transpose(np.nonzero(wall))[::4]
        center,S,V = fit_ellipsoidPCA( points )
        if V[0,0] < 0:
            V *= -1
        
        points = np.transpose(np.nonzero(wall))
        projections = np.dot(points-center,V[0])

        # valves
        index = projections > (projections.max() - 40.0*0.001/seg.header['pixelSize'][0])

        #print "VALVE size:",np.sum(index), projections.max(), 40.0*0.001/seg.header['pixelSize'][0]
    
        wall[points[index,0],
             points[index,1],
             points[index,2]] = 2

        #print "VALVE1", wall.max()

        wall = irtk.Image(wall,seg.get_header())
    
        img = img.resample( pixelSize=resample, interpolation='linear' ).rescale(0,1000)
        seg = seg.transform(target=img,interpolation="nearest").astype('uint8')
        wall = wall.transform(target=img,interpolation="nearest").astype('uint8')
 
        wall[seg>0] = 0
        seg[wall==1] = 2
        seg[wall==2] = 3

        #print "VALVE2", seg.max()
    
        #irtk.imwrite("debug/"+patient_id+"_border.nii.gz",seg)
    
        seg[img==0] = 255

        if offline:
            irtk.imwrite( "offline_preprocessing/"+patient_id+"_img.nii.gz", img )
            irtk.imwrite( "offline_preprocessing/"+patient_id+"_seg.nii.gz", seg )
            return

    if not online:
        img = irtk.imread( "offline_preprocessing/"+patient_id+"_img.nii.gz" )
        seg = irtk.imread( "offline_preprocessing/"+patient_id+"_seg.nii.gz" )
        
    mask = irtk.ones( img.get_header(), dtype='uint8' )
    mask[img==0] = 0

    return { 'patient_id': patient_id,
             'img' : img,
             'seg' : seg,
             'mask' : mask }
示例#34
0
import scipy.ndimage as nd
from skimage import morphology

import argparse

parser = argparse.ArgumentParser(
    description='' )
parser.add_argument( '--seg', type=str, required=True )
parser.add_argument( '--img', type=str, required=True )
parser.add_argument( '--output', type=str, required=True )
parser.add_argument( '--narrow_band', type=int, default=5 )
parser.add_argument( '--debug', action="store_true", default=False )

args = parser.parse_args()

seg = irtk.imread( args.seg, dtype='int32', force_neurological=True )
img = irtk.imread( args.img, dtype='float32', force_neurological=True ).rescale(0,1000)

res = irtk.zeros( seg.get_header(), dtype='uint8' )

ball = morphology.ball( args.narrow_band )

nb_labels = 5

# for i in range(1,5):
#     tmp_seg = (seg==i).astype('int32')
#     # crop
#     x_min,y_min,z_min,x_max,y_max,z_max = (tmp_seg).bbox()
#     mask = tmp_seg[max(0,z_min-2*args.narrow_band):min(seg.shape[0],z_max+2*args.narrow_band+1),
#                       max(0,y_min-2*args.narrow_band):min(seg.shape[1],y_max+2*args.narrow_band+1),
#                       max(0,x_min-2*args.narrow_band):min(seg.shape[2],x_max+2*args.narrow_band+1)]
示例#35
0
#!/usr/bin/python

import sys
import numpy as np
import irtk

full_file = sys.argv[1]
cropped_file = sys.argv[2]

full_img = irtk.imread(full_file, dtype='float32')
cropped_img = irtk.imread(cropped_file, dtype='float32')

(z, y, x), score = irtk.match_template(full_img, cropped_img, pad_input=False)

print score

print ' '.join(
    map(str,
        [full_img.shape[0], full_img.shape[1], full_img.shape[2], z, y, x]))
示例#36
0
def get_training_data( file_img, file_mask, r ):
    # create mask
    input_mask = irtk.imread( file_mask )
    x_min, y_min, z_min, x_max, y_max, z_max = (input_mask == 0).bbox()

    background = irtk.zeros( input_mask.get_header(), dtype='uint8' )
    background[z_min:z_max+1,
               y_min:y_max+1,
               x_min:x_max+1] = 1
    background = nd.morphological_gradient( background, size=7)
    n = background[z_min+1:z_max,
                   y_min+1:y_max,
                   x_min+1:x_max].sum()
    z = np.random.randint(low=0, high=input_mask.shape[0],size=1.25*n)
    y = np.random.randint(low=0, high=input_mask.shape[1],size=1.25*n)
    x = np.random.randint(low=0, high=input_mask.shape[2],size=1.25*n)
    background[z,y,x] = 1
    background[z_min+1:z_max,
               y_min+1:y_max,
               x_min+1:x_max] = 0
    
    foreground = (input_mask == 1).astype('uint8')

    new_mask = irtk.zeros( input_mask.get_header(), dtype='uint8' )
    new_mask[foreground == 1] = 1
    new_mask[background != 0] = 2

    img = irtk.imread( file_img, dtype='float32' )
    
    X = []
    Y = []

    for z in xrange(img.shape[0]):
        YX = np.transpose( np.nonzero( foreground[z] ) )
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
        print patches.shape, YX.shape
        X.extend( patches )
        Y.extend( [1]*len(YX) )

    for z in xrange(img.shape[0]):
        YX = np.transpose( np.nonzero( background[z] ) )
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
        print patches.shape, YX.shape
        X.extend( patches )
        Y.extend( [0]*len(YX) )

    return X, Y
示例#37
0
#!/usr/bin/python

import sys
import numpy as np
import irtk

from pyhull.convex_hull import ConvexHull
from irtk.vtk2irtk import voxellise

seg = irtk.imread(sys.argv[1])
output = sys.argv[2]

ZYX = np.transpose(np.nonzero(seg))
pts = seg.ImageToWorld(ZYX[:, ::-1])
hull = ConvexHull(pts)

img = voxellise(hull.points, hull.vertices, header=seg.get_header())

irtk.imwrite(output, img)
示例#38
0
    
    return labels == best_label
    
def background_distance(img,metric='geodesic',includeEDT=True):
    background = get_background(img)

    if metric == "euclidean":
        distanceMap = edt( img, background )
    elif metric == "geodesic":
        distanceMap = gdt( img, background, includeEDT )
    else:
        raise ValueError("Unknown metric: "+ metric)
    
    return irtk.Image(distanceMap,img.get_header())


if __name__ == "__main__":

    img = irtk.imread( sys.argv[1], dtype="float64" )
    #filtered = nd.minimum_filter(img,5)
    filtered = nd.gaussian_gradient_magnitude(img,0.5)
    img = irtk.Image(filtered,img.get_header())
    irtk.imwrite("test2.nii.gz",img)
    exit(0)
    
    img = world_align(img,pixelSize=[2,2,2,1])

    irtk.imwrite("distanceEDT.nii.gz",background_distance(img,metric="euclidean"))

    irtk.imwrite( "distanceGDT.nii.gz", background_distance(img,metric="geodesic"))
示例#39
0
def detect_mser(raw_file,
                ga,
                vocabulary,
                mser_detector,
                NEW_SAMPLING,
                output_folder="debug",
                DEBUG=False,
                return_image_regions=False):

    OFD = get_OFD(ga) / NEW_SAMPLING
    BPD = get_BPD(ga) / NEW_SAMPLING

    max_e = 0.64

    mser = cv2.MSER(_delta=5,
                    _min_area=60,
                    _max_area=14400,
                    _max_variation=0.15,
                    _min_diversity=.1,
                    _max_evolution=200,
                    _area_threshold=1.01,
                    _min_margin=0.003,
                    _edge_blur_size=5)

    sift = cv2.SIFT(nfeatures=0,
                    nOctaveLayers=3,
                    contrastThreshold=0.04,
                    edgeThreshold=10,
                    sigma=0.8)
    siftExtractor = cv2.DescriptorExtractor_create("SIFT")

    voca = np.load(open(vocabulary, 'rb'))
    classifier = neighbors.NearestNeighbors(1, algorithm='kd_tree')
    N = voca.shape[0]
    classifier.fit(voca)
    #flann = pyflann.FLANN()
    #flann.build_index( voca.astype('float32') )

    svc = joblib.load(mser_detector)

    img = irtk.imread(raw_file, dtype='float32')
    img = img.resample2D(NEW_SAMPLING).saturate().rescale().astype('uint8')

    detections = []
    image_regions = []
    for z in range(img.shape[0]):
        detections.append([])
        image_regions.append([])

        # Extract MSER
        #print "extracting mser"
        contours = mser.detect(img[z, :, :])
        #print "mser done"

        if DEBUG:
            img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)
            for c in contours:
                ellipse = cv2.fitEllipse(
                    np.array(map(lambda x: [x], c), dtype='int32'))
                cv2.ellipse(img_color,
                            (ellipse[0],
                             (ellipse[1][0], ellipse[1][1]), ellipse[2]),
                            (0, 0, 255))

            cv2.imwrite(output_folder + "/" + str(z) + "_all_mser_.png",
                        img_color)

            img_color_mser = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)

        # Filter MSER
        selected_mser = []
        mask = np.zeros((img.shape[1], img.shape[2]), dtype='uint8')
        #print "fitting ellipses"
        for c in contours:
            ellipse = cv2.fitEllipse(
                np.reshape(c, (c.shape[0], 1, 2)).astype('int32'))

            # filter by size
            if (ellipse[1][0] > OFD or ellipse[1][1] > OFD
                    or ellipse[1][0] < 0.5 * OFD or ellipse[1][1] < 0.5 * OFD):
                continue

            # filter by eccentricity
            if math.sqrt(1 -
                         (np.min(ellipse[1]) / np.max(ellipse[1]))**2) > max_e:
                continue

            cv2.ellipse(mask, ellipse, 255, -1)
            selected_mser.append((c, ellipse))

        #print "ellipses done"
        if len(selected_mser) == 0:
            continue

        # Extract SIFT
        #print "extracting SIFT"
        keypoints = sift.detect(img[z, :, :], mask=mask)
        #print "SIFT done"
        if keypoints is None or len(keypoints) == 0:
            continue
        (keypoints,
         descriptors) = siftExtractor.compute(img[z, :, :], keypoints)

        # words = np.zeros(len(keypoints),dtype="int")
        # for i,d in enumerate(descriptors):
        #     words[i] = classifier.kneighbors(d, return_distance=False)
        words = classifier.kneighbors(descriptors, return_distance=False)
        #words, dist = flann.nn_index( descriptors.astype('float32') )

        for i, (c, ellipse) in enumerate(selected_mser):
            # Compute histogram
            hist = np.zeros(N, dtype='float')
            for ki, k in enumerate(keypoints):
                if is_in_ellipse(k.pt, ellipse):
                    hist[words[ki]] += 1

            # Normalize histogram
            norm = np.linalg.norm(hist)
            if norm > 0:
                hist /= norm

            cl = svc.predict(hist).flatten()

            if DEBUG:
                if cl == 1:
                    opacity = 0.4
                    img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)
                    for p in c:
                        img_color[p[1], p[0], :] = (
                            (1 - opacity) * img_color[p[1], p[0], :] +
                            opacity * np.array([0, 255, 0]))
                    cv2.imwrite(
                        output_folder + "/" + str(z) + '_' + str(i) +
                        "_region.png", img_color)

                img_color = cv2.cvtColor(img[z, :, :], cv2.cv.CV_GRAY2RGB)

                cv2.ellipse(img_color,
                            (ellipse[0],
                             (ellipse[1][0], ellipse[1][1]), ellipse[2]),
                            (0, 0, 255))
                for k_id, k in enumerate(keypoints):
                    if is_in_ellipse(k.pt, ellipse):
                        if cl == 1:
                            cv2.circle(img_color, (int(k.pt[0]), int(k.pt[1])),
                                       2, (0, 255, 0), -1)
                        else:
                            cv2.circle(img_color, (int(k.pt[0]), int(k.pt[1])),
                                       2, (0, 0, 255), -1)
                cv2.imwrite(
                    output_folder + "/" + str(z) + '_' + str(i) + ".png",
                    img_color)

                cv2.ellipse(img_color_mser,
                            (ellipse[0],
                             (ellipse[1][0], ellipse[1][1]), ellipse[2]),
                            (0, 255, 0) if cl == 1 else (0, 0, 255))

            if cl == 1:
                ellipse_center = [ellipse[0][0], ellipse[0][1], z]
                print c.shape
                if return_image_regions:
                    image_regions[-1].append((ellipse_center, c))
                else:
                    region = np.hstack((c, [[z]] * c.shape[0]))
                    detections[-1].append((img.ImageToWorld(ellipse_center),
                                           img.ImageToWorld(region)))

        if DEBUG:
            cv2.imwrite(output_folder + "/" + str(z) + "_color_mser.png",
                        img_color_mser)

    if return_image_regions:
        return image_regions
    else:
        return detections
示例#40
0
import os


def get_ED_ES(patient_id):
    f = "/vol/biomedic/users/kpk09/DATASETS/CETUS_data/Training/Images/" + patient_id + "/" + patient_id + "_ED_ES_time.txt"
    f = open(f, "rb")
    res = []
    for line in f:
        line = line.rstrip()  # chomp
        res.append(int(line.split(' ')[-1]))
    return res


all_files = glob("nifti/*_seg.nii.gz")

m = 0.0
n = 0.0
for f in all_files:
    patient_id = os.path.basename(f).split("_")[0]
    frame_id = int(os.path.basename(f).split("_")[1][len("frame"):])
    ED_ES = get_ED_ES(patient_id)
    print patient_id, frame_id, ED_ES
    if frame_id == ED_ES[1]:
        mask = irtk.imread(f).resample(0.001, interpolation='nearest')
        m += mask.sum()
        n += 1

m /= n

print "mean heart volume:", m
parser.add_argument( '--brain_center', type=float, nargs=3 )
parser.add_argument( '--verbose', action="store_true", default=False )
parser.add_argument( '--debug', action="store_true", default=False )
parser.add_argument( '--n_jobs', type=int, default=20 )
parser.add_argument( '--chunk_size', type=int, default=int(3e6) )
parser.add_argument( '-l', type=float, default=0.5 )
parser.add_argument( '--theta', type=int, default=90 )
args = parser.parse_args()

print args

output_dir = os.path.dirname(args.output)
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

template = irtk.imread(args.template,force_neurological=True)
img = irtk.imread(args.input,force_neurological=True)

if args.ga > 0:
    scale = get_CRL(args.ga)/get_CRL(30.0)
    resized_img = img.resample( 1.0*scale, interpolation='bspline' ).rescale(0,1000)
    resized_input = output_dir + "/resized_" + os.path.basename(args.input)
    irtk.imwrite( resized_input, resized_img )

    heart_center = resized_img.WorldToImage([0,0,0])[::-1]#np.array(resized_img.shape,dtype='float32')/2
    if not args.brain_center:
        brain_center = heart_center + np.array([0,0,100])
    else:
        brain_center = np.array(args.brain_center,dtype='float32')

else:
示例#42
0
#!/usr/bin/python

import irtk
from glob import glob
import os
import sys
import numpy as np

if not os.path.exists("png"):
    os.makedirs("png")

if len(sys.argv) == 1:
    all_files = glob("predictions/*/iter4_*_hard.nii.gz")
else:
    all_files = glob("predictions/" + sys.argv[1] + "/iter4_*_hard.nii.gz")

for f in all_files:
    print f
    name = os.path.basename(f)[len("iter4_"):-len("_hard.nii.gz")]
    img = irtk.imread("denoised/" + name + ".nii.gz", dtype='int32')
    mask = irtk.imread(f).transform(target=img.get_header(),
                                    interpolation="nearest")
    irtk.imshow(img, mask, filename="png/" + name + ".png", opacity=0.4)
def process_file( f,
                  ga,
                  coordinates,
                  size,
                  classifier,
                  N,
                  DEBUG=False ):
    X = []
    Y = []

    scan_id = os.path.basename(f).split('.')[0]

    max_e = 0.64
    OFD = get_OFD(30.0,centile=50)

    mser = cv2.MSER( _delta=5,
                     _min_area=60,
                     _max_area=14400,
                     _max_variation=0.15,
                     _min_diversity=.1,
                     _max_evolution=200,
                     _area_threshold=1.01,
                     _min_margin=0.003,
                     _edge_blur_size=5)

    sift = cv2.SIFT( nfeatures=0,
                     nOctaveLayers=3,
                     contrastThreshold=0.04,
                     edgeThreshold=10,
                     sigma=0.8)
    siftExtractor = cv2.DescriptorExtractor_create("SIFT")

    box = np.array(map(float,coordinates.split(',')),dtype='float')
    size = np.array(map(float,size.split(',')),dtype='float')

    img = irtk.imread( f, dtype='float32', force_neurological=False )
    old_img = img.copy()

    ## Resample
    img = resampleOFD( img, ga )
    
    # Adjust coordinates and size
    box = img.WorldToImage( old_img.ImageToWorld(box[::-1]) )[::-1]

    box_size = size * old_img.header['pixelSize'][:3][::-1]/img.header['pixelSize'][:3][::-1]

    z0,y0,x0 = box.astype('int')
    d0,h0,w0 = box_size.astype('int')

    brain_center = (x0 + w0/2, y0 + h0/2)

    ## Contrast-stretch with saturation
    img = img.saturate(1,99).rescale().astype('uint8')

    for z in range(img.shape[0]):
        contours = mser.detect(img[z])
        keypoints = sift.detect(img[z])
        if keypoints is None or len(keypoints) == 0:
            continue
        (keypoints, descriptors) = siftExtractor.compute(img[z],keypoints)     

        for i,c in enumerate(contours):
            hist = np.zeros(N, dtype='float')
            ellipse = cv2.fitEllipse(np.array(map(lambda x:[x],
                                                  c),dtype='int32'))
            
            # filter by size
            if ( ellipse[1][0] > OFD
                 or ellipse[1][1] > OFD
                 or ellipse[1][0] < 0.5*OFD
                 or ellipse[1][1] < 0.5*OFD ) :
                continue

            # filter by eccentricity
            if math.sqrt(1-(np.min(ellipse[1])/np.max(ellipse[1]))**2) > max_e:
                continue

            distance = math.sqrt((ellipse[0][0]-brain_center[0])**2
                                 +(ellipse[0][1]-brain_center[1])**2)

            if max(w0,h0)/2 >= distance >= min(w0,h0)/8:
                continue            

            for k,d in zip(keypoints,descriptors):
                if is_in_ellipse(k.pt,ellipse):
                    c = classifier.kneighbors(d, return_distance=False)
                    hist[c] += 1

            # Normalize histogram
            norm = np.linalg.norm(hist)
            if norm > 0:
                hist /= norm

            if distance > max(w0,h0)/4:
                if DEBUG: print 0
                X.append(hist)
                Y.append(0)
            else:
                if distance < min(w0,h0)/8 and z0 + d0/8 <= z <= z0+7*d0/8:
                    if DEBUG: print 1
                    X.append(hist)
                    Y.append(1)
                else:
                    continue
                   
            if DEBUG and Y[-1] == 1:
                img_color = cv2.cvtColor( img[z], cv2.cv.CV_GRAY2RGB )
                cv2.ellipse( img_color, (ellipse[0],
                                         (ellipse[1][0],ellipse[1][1]),
                                         ellipse[2]) , (0,0,255))
                for k_id,k in enumerate(keypoints):
                    if is_in_ellipse(k.pt,ellipse):
                        if Y[-1] == 1:
                            cv2.circle( img_color,
                                        (int(k.pt[0]),int(k.pt[1])),
                                        2,
                                        (0,255,0),
                                        -1)
                        else:
                            cv2.circle( img_color,
                                        (int(k.pt[0]),int(k.pt[1])),
                                        2,
                                        (0,0,255),
                                        -1)
                cv2.imwrite("debug/"+scan_id+'_'+str(z) + '_' +str(i) +'_'+str(k_id)+".png",img_color)

    return X,Y
示例#44
0
#!/usr/bin/python

import sys
import numpy as np
import irtk

full_file = sys.argv[1]
cropped_file = sys.argv[2]

full_img = irtk.imread(full_file, dtype="float32")
cropped_img = irtk.imread(cropped_file, dtype="float32")

(z, y, x), score = irtk.match_template(full_img, cropped_img, pad_input=False)

print score

print " ".join(map(str, [full_img.shape[0], full_img.shape[1], full_img.shape[2], z, y, x]))
示例#45
0
if os.environ['USER'] == "kevin":
    raw_folder = "/home/kevin/Imperial/PhD/DATASETS/Originals/"
    vocabulary = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/vocabulary_" + args.fold + ".npy"
    mser_detector = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/mser_detector_" + args.fold + "_linearSVM"
    ga_file = "/home/kevin/Imperial/PhD/MyPHD/Detection/BOW/pipeline2/LEARNING/metadata/ga.csv"
else:
    raw_folder = "/vol/biomedic/users/kpk09/DATASETS/Originals"
    vocabulary = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/vocabulary_" + args.fold + ".npy"
    mser_detector = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/mser_detector_" + args.fold + "_linearSVM"
    ga_file = "/vol/biomedic/users/kpk09/pipeline2/LEARNING/metadata/ga.csv"

    print "Detect MSER regions"
detections = []
NEW_SAMPLING = 0.8

img = irtk.imread(filename, dtype="float32").saturate().rescale()

image_regions = detect_mser(filename,
                            ga,
                            vocabulary,
                            mser_detector,
                            NEW_SAMPLING,
                            DEBUG=args.debug,
                            output_folder=output_dir,
                            return_image_regions=True)

# flatten list
# http://stackoverflow.com/questions/406121/flattening-a-shallow-list-in-python
import itertools
chain = itertools.chain(*image_regions)
image_regions = list(chain)
示例#46
0
#!/usr/bin/python

import irtk
from glob import glob
import os
import sys
import numpy as np

if not os.path.exists("png"):
    os.makedirs("png")

if len(sys.argv) == 1:
    all_files = glob( "predictions/*/iter4_*_hard.nii.gz" )
else:
    all_files = glob( "predictions/"+sys.argv[1]+"/iter4_*_hard.nii.gz" )

for f in all_files:
    print f
    name = os.path.basename(f)[len("iter4_"):-len("_hard.nii.gz")]
    img = irtk.imread("denoised/"+name+".nii.gz",dtype='int32')
    mask = irtk.imread(f).transform(target=img.get_header(),interpolation="nearest")
    irtk.imshow(img,mask,filename="png/"+name+".png",opacity=0.4)
示例#47
0
文件: decrop.py 项目: zhukequan/IRTK
#!/usr/bin/python

import sys

import numpy as np
import irtk

# img1 = irtk.imread("reconstruction/t2.nii", dtype='float32')
# img2 = irtk.imread("reconstruction/t2seg.nii", dtype='float32')
# irtk.imwrite( "reconstruction/segfixed.nii", img2.transform(target=img1) )

shape = map(int, sys.argv[1:4])
z = int(sys.argv[4])
y = int(sys.argv[5])
x = int(sys.argv[6])

target = irtk.imread(sys.argv[7])

img = irtk.imread(sys.argv[8], dtype='float32')

new_data = np.zeros(shape, dtype='int32')
new_data[z:z + img.shape[0], y:y + img.shape[1], x:x + img.shape[2]] = img

new_img = irtk.Image(new_data, target.get_header())
irtk.imwrite(sys.argv[9], new_img)