示例#1
0
    def run_svm_inference(self, test, w):
        logger.info('running inference on: {}'.format(test))

        outdir = self.dir_inf + test
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

        ## segment test image with trained w
        def wwf(im, _w):
            ''' meta weight function'''
            data = 0
            for iwf, wf in enumerate(self.weight_functions.values()):
                ij, _data = wf(im)
                data += _w[iwf] * _data
            return ij, data

        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im = self.dir_reg + test + 'gray.hdr'
        im = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0]

        ## save image
        im = im / np.std(im)  # normalize image by variance

        ## prior
        anchor_api = BaseAnchorAPI(
            self.prior,
            anchor_weight=w[-1],
        )

        sol, y = rwsegment.segment(im,
                                   anchor_api,
                                   seeds=self.seeds,
                                   weight_function=lambda im: wwf(im, w),
                                   **self.rwparams_inf)

        np.save(outdir + 'y.test.npy', y)
        io_analyze.save(outdir + 'sol.test.hdr', sol.astype(np.int32))

        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        np.savetxt(outdir + 'dice.test.txt',
                   np.c_[dice.keys(), dice.values()],
                   fmt='%d %.8f')

        ## inference compare with gold standard
        dice_gold = np.loadtxt(outdir + 'dice.gold.txt')
        y_gold = np.load(outdir + 'y.gold.npy')
        sol_gold = io_analyze.load(outdir + 'sol.gold.hdr')

        np.testing.assert_allclose(dice.values(),
                                   dict(dice_gold).values(),
                                   err_msg='FAIL: dice coef mismatch',
                                   atol=1e-8)
        np.testing.assert_allclose(y, y_gold, err_msg='FAIL: y mismatch')
        np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch')

        print 'PASS: inference tests'
示例#2
0
    def load_or_compute_classifier(self,train,test,mask=None):
        from rwsegment import boundary_utils
        reload(boundary_utils)

        #idir_prior = config.dir_prior_edges + test
        #if not os.path.isdir(dir_prior):
        #    os.makedirs(dir_prior)
 
        ## Train classifier
        logger.info('train classifier with train {} for test {}'.format(train, test))

        ##load image and seg
        im = io_analyze.load(
            config.dir_reg + test + train + 'reggray.hdr').astype(float)
        nim = im/np.std(im)
        seg = io_analyze.load(config.dir_reg + test + train + 'regseg.hdr')
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
        ## sample points
        points = boundary_utils.sample_points(im, self.step,  mask=mask)
        logger.debug('number of sampled points = {}'.format(len(points)))

        #impoints = np.zeros(im.shape,dtype=int)
        #impoints[tuple(points.T)] = np.arange(len(points)) + 1

        ## compute edges
        edges,edgev,labels = boundary_utils.get_edges(im, points,  mask=mask)
        logger.debug('number of edges = {}'.format(len(edges)))

        ## extract profiles
        profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0)
        logger.debug('extracted profiles')

        ## make features
        x = boundary_utils.make_features(profiles, size=self.sizex, additional=[dists,edgev,edgev/dists])
        logger.debug('features made, size = {}'.format(len(x[0])))

        ## make annotations
        z = boundary_utils.is_boundary(points, edges, seg)
        logger.debug('annotations made')
        

        ## learn profiles
        logger.debug('training classifier')
        classifier = boundary_utils.Classifier()
        classifier.train(x,z)
        
        ## test classification
        logger.debug('testing classifier')
        cl, scores = classifier.classify(x)

        logger.info('non boundary correct rate: {:.3}'.format( 
            np.sum((np.r_[cl]==0)&(np.r_[z]==0))/np.sum(np.r_[z]==0).astype(float)))
        logger.info('boundary correct rate: {:.3}'.format( 
            np.sum((np.r_[cl]==1)&(np.r_[z]==1))/np.sum(np.r_[z]==1).astype(float)))
        

        ## store classifier
        #np.savetxt(dir_prior + 'classifier.txt', classifier.w)
        return classifier    
示例#3
0
    def process_sample(self, test, fold=None):

        ## get prior
        prior, mask = load_or_compute_prior_and_mask(
            test, force_recompute=self.force_recompute_prior, pca=True, fold=fold
        )
        seeds = (-1) * mask
        mask = mask.astype(bool)

        ## load image
        file_name = config.dir_reg + test + "gray.hdr"
        logger.info("segmenting data: {}".format(file_name))
        im = io_analyze.load(file_name)
        file_gt = config.dir_reg + test + "seg.hdr"
        seg = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]

        ## normalize image
        nim = im / np.std(im)

        ## init anchor_api
        anchor_api = MetaAnchor(
            prior=prior, prior_models=self.prior_models, prior_weights=self.prior_weights, image=nim
        )

        ## start segmenting
        # import ipdb; ipdb.set_trace()
        sol, impca = rwsegment_pca.segment(nim, anchor_api, seeds=seeds, labelset=self.labelset, **self.params)

        ## compute Dice coefficient per label
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        logger.info("Dice: {}".format(dice))

        dice_pca = compute_dice_coef(impca, seg, labelset=self.labelset)
        logger.info("Dice pca only: {}".format(dice_pca))

        if not config.debug:
            if fold is not None:
                test_name = "f{}_{}".format(fold[0][:2], test)
            else:
                test_name = test
            outdir = config.dir_seg + "/{}/{}".format(self.model_name, test_name)

            logger.info("saving data in: {}".format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

            io_analyze.save(outdir + "sol.hdr", sol.astype(np.int32))
            io_analyze.save(outdir + "solpca.hdr", impca.astype(np.int32))

            np.savetxt(outdir + "dice.txt", np.c_[dice.keys(), dice.values()], fmt="%d %.8f")
            np.savetxt(outdir + "dice_pca.txt", np.c_[dice.keys(), dice_pca.values()], fmt="%d %.8f")
示例#4
0
def batch_compute_dice(folder_list, labelset=[13,14,15,16]):
    import config
    dir_gt = config.dir_reg
    lbset = np.asarray(labelset, dtype=int)
    for folder in folder_list:
        for vol in config.vols:
            if not os.path.isfile(folder + '/' + vol + '/sol.hdr'):
                continue
            print 'computing dice for segmentation: {}'.format(folder + '/' + vol + '/sol.hdr')
            sol = io_analyze.load(folder + '/' + vol + '/sol.hdr').astype(int)
            gt  = io_analyze.load(dir_gt + '/' + vol + '/seg.hdr').astype(int)
            d_slice  = compute_dice_per_slice(sol, gt, labelset=labelset)
            d_labels = compute_dice_coef(sol, gt, labelset=labelset)
            #print d_slice, d_labels
            np.savetxt(folder + '/' + vol + '/dice_labels.txt', np.c_[d_labels.keys(), d_labels.values()], fmt='%d %f')
            np.savetxt(folder + '/' + vol + '/dice_slices.txt', np.c_[d_slice.keys(), d_slice.values()], fmt='%d %f')
示例#5
0
 def compute_mask(self, train):
     seg = io_analyze.load(config.dir_reg + test + train + 'regseg.hdr')
     seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
     import ndimage
     struct = np.ones((20, ) * mask.ndim)
     mask = ndimage.binary_dilation(
         seg > 0,
         structure=struct,
     )
     return mask
示例#6
0
 def compute_mask(self, train):
     seg = io_analyze.load(config.dir_reg + test + train + 'regseg.hdr')
     seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
     import ndimage 
     struct  = np.ones((20,)*mask.ndim)
     mask    = ndimage.binary_dilation(
             seg>0,
             structure=struct,
             )
     return mask
    def train_svm(self,test):
        outdir = test

        ## training images and segmentations
        self.training_set = []
        for train in self.training_vols:
            if test==train: continue
            logger.info('loading training data: {}'.format(train))
            file_seg = self.dir_reg + test + train + 'regseg.hdr'
            file_im  = self.dir_reg + test + train + 'reggray.hdr'
            
            im  = io_analyze.load(file_im)
            im = im/np.std(im) # normalize image by std
            
            seg = io_analyze.load(file_seg)
            seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]
            z = (seg.ravel()==np.c_[self.labelset])# make bin vector z
            
            self.training_set.append((im, z))

        ## instanciate functors
        self.svm_rwmean_api = SVMRWMeanAPI(
            self.prior, 
            self.weight_functions, 
            self.labelset, 
            self.rwparams_svm,
            seeds=self.seeds,
            )
        
        ## learn struct svm
        logger.debug('start learning')
        self.svm = struct_svm.StructSVM(
            self.training_set,
            self.svm_rwmean_api.compute_loss,
            self.svm_rwmean_api.compute_psi,
            self.svm_rwmean_api.compute_mvc,
            **self.svmparams
            )

        w,xi,info = self.svm.train()
        
        return w,xi,info
示例#8
0
    def train_svm(self, test):
        outdir = test

        ## training images and segmentations
        self.training_set = []
        for train in self.training_vols:
            if test == train: continue
            logger.info('loading training data: {}'.format(train))
            file_seg = self.dir_reg + test + train + 'regseg.hdr'
            file_im = self.dir_reg + test + train + 'reggray.hdr'

            im = io_analyze.load(file_im)
            im = im / np.std(im)  # normalize image by std

            seg = io_analyze.load(file_seg)
            seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0]
            z = (seg.ravel() == np.c_[self.labelset])  # make bin vector z

            self.training_set.append((im, z))

        ## instanciate functors
        self.svm_rwmean_api = SVMRWMeanAPI(
            self.prior,
            self.weight_functions,
            self.labelset,
            self.rwparams_svm,
            seeds=self.seeds,
        )

        ## learn struct svm
        logger.debug('start learning')
        self.svm = struct_svm.StructSVM(self.training_set,
                                        self.svm_rwmean_api.compute_loss,
                                        self.svm_rwmean_api.compute_psi,
                                        self.svm_rwmean_api.compute_mvc,
                                        **self.svmparams)

        w, xi, info = self.svm.train()

        return w, xi, info
示例#9
0
    def load_or_compute_orientations(self, train, test, mask=None):
        from rwsegment import boundary_utils
        reload(boundary_utils)

        ## Train classifier
        logger.info('train orientations with train {} for test {}'.format(
            train, test))

        ##load image and seg
        seg = io_analyze.load(config.dir_reg + test + train +
                              'regseg.hdr').astype(int)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]

        ## sample points
        points = boundary_utils.sample_points(np.ones(seg.shape), 5, mask=mask)
        logger.debug('number of sampled points = {}'.format(len(points)))

        ## find edges between muscles
        points_label = seg[tuple(points.T)]
        nlabel = len(self.labelset)
        hist = {}
        orient_scores = np.zeros((self.orients.shape[0], self.nlabel**2))
        #orient_prior = np.ones((3,self.label_pairs.shape[0]))
        ipair = 0
        for l1 in range(nlabel):
            label1 = self.labelset[l1]
            inds1 = np.where(points_label == label1)[0]
            hist[label1] = {}
            for l2 in range(nlabel):
                label2 = self.labelset[l2]
                inds2 = np.where(points_label == label2)[0]
                edges = np.argwhere(
                    np.triu(np.ones((inds1.size, inds2.size)), k=1))
                edges = np.c_[inds1[edges[:, 0]], inds2[edges[:, 1]]]
                vecs = points[edges[:, 1]] - points[edges[:, 0]]
                vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2, axis=1))]
                if l1 == 0 or l1 == l2:
                    avg = np.ones(len(self.orients)) / float(len(self.orients))
                else:
                    scores = self.get_orient_scores(vecs)
                    avg = np.mean(scores, axis=0)
                avgvecs = np.mean(vecs, axis=0)

                hist[label1][label2] = avg
                orient_scores[:, ipair] = avg
                #orient_prior[:,ipair] = avgvecs
                #print self.label_pairs[ipair], avg
                ipair += 1

        #orient_scores /= np.c_[np.sum(orient_scores,axis=1)]
        #import ipdb; ipdb.set_trace()
        return orient_scores, hist
示例#10
0
    def load_or_compute_orientations(self,train,test, mask=None):
        from rwsegment import boundary_utils
        reload(boundary_utils)

        ## Train classifier
        logger.info('train orientations with train {} for test {}'.format(train, test))

        ##load image and seg
        seg = io_analyze.load(config.dir_reg + test + train + 'regseg.hdr').astype(int)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
        ## sample points
        points = boundary_utils.sample_points(np.ones(seg.shape), 5,  mask=mask)
        logger.debug('number of sampled points = {}'.format(len(points)))


        ## find edges between muscles
        points_label = seg[tuple(points.T)]
        nlabel = len(self.labelset)
        hist = {}
        orient_scores = np.zeros((self.orients.shape[0],self.nlabel**2))
        #orient_prior = np.ones((3,self.label_pairs.shape[0]))
        ipair = 0
        for l1 in range(nlabel):
            label1 = self.labelset[l1]
            inds1 = np.where(points_label==label1)[0]
            hist[label1] = {}
            for l2 in range(nlabel):
                label2  = self.labelset[l2]
                inds2   = np.where(points_label==label2)[0]
                edges   = np.argwhere(np.triu(np.ones((inds1.size, inds2.size)),k=1))
                edges   = np.c_[inds1[edges[:,0]], inds2[edges[:,1]]]
                vecs    = points[edges[:,1]] - points[edges[:,0]]
                vecs    = vecs / np.c_[np.sqrt(np.sum(vecs**2,axis=1))]
                if l1==0 or l1==l2:
                    avg = np.ones(len(self.orients))/float(len(self.orients))
                else:
                    scores  = self.get_orient_scores(vecs)
                    avg     = np.mean(scores,axis=0)
                avgvecs = np.mean(vecs,axis=0)
                
                hist[label1][label2] = avg
                orient_scores[:,ipair] = avg
                #orient_prior[:,ipair] = avgvecs
                #print self.label_pairs[ipair], avg
                ipair += 1
                

        #orient_scores /= np.c_[np.sum(orient_scores,axis=1)]
        #import ipdb; ipdb.set_trace()
        return orient_scores,hist
示例#11
0
def batch_compute_dice(folder_list, labelset=[13, 14, 15, 16]):
    import config
    dir_gt = config.dir_reg
    lbset = np.asarray(labelset, dtype=int)
    for folder in folder_list:
        for vol in config.vols:
            if not os.path.isfile(folder + '/' + vol + '/sol.hdr'):
                continue
            print 'computing dice for segmentation: {}'.format(folder + '/' +
                                                               vol +
                                                               '/sol.hdr')
            sol = io_analyze.load(folder + '/' + vol + '/sol.hdr').astype(int)
            gt = io_analyze.load(dir_gt + '/' + vol + '/seg.hdr').astype(int)
            d_slice = compute_dice_per_slice(sol, gt, labelset=labelset)
            d_labels = compute_dice_coef(sol, gt, labelset=labelset)
            #print d_slice, d_labels
            np.savetxt(folder + '/' + vol + '/dice_labels.txt',
                       np.c_[d_labels.keys(),
                             d_labels.values()],
                       fmt='%d %f')
            np.savetxt(folder + '/' + vol + '/dice_slices.txt',
                       np.c_[d_slice.keys(), d_slice.values()],
                       fmt='%d %f')
def load_or_compute_prior_and_mask(test, force_recompute=False, pca=False, fold=None):
    if fold is not None:
        test_name = 'f{}_{}/'.format(fold[0][:2], test)
    else:
        test_name = test
        fold = [test]
     
    #import ipdb; ipdb.set_trace()
    labelset = np.asarray(config.labelset)
    if pca:
        outdir = config.dir_pca_prior + test_name
    else:
        outdir = config.dir_prior + test_name
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    
    ## load mask and prior
    prior = None
    file_mask  = outdir + 'mask.hdr'
    file_prior = outdir + 'prior.npz'
    file_U = outdir + 'eigenvectors.npy'
    file_segprior = outdir + 'segprior.hdr'
    file_entropymap = outdir + 'entropymap.hdr'
   
    if force_recompute or not (os.path.exists(file_prior) and os.path.exists(file_mask)):
        if pca:
            _prior, mask = load_or_compute_prior_and_mask(test, fold=fold)
            generator = rwsegment_pca_prior.PriorGenerator(labelset, mask=mask)
        else:
            generator = rwsegment_prior.PriorGenerator(labelset)
 
        ntrain = 0
        for train in config.vols:
            if train in fold: continue
            logger.debug('load training img: {}'.format(train))
            
            ## segmentation
            file_seg = config.dir_reg + test + train + 'regseg.hdr'
            seg = io_analyze.load(file_seg)
            
            ## image (for intensity prior)
            file_im = config.dir_reg + test + train + 'reggray.hdr'
            im = io_analyze.load(file_im)
            
            generator.add_training_data(seg,image=im, nrandom=5)
            ntrain += 1

        if not pca:
            from scipy import ndimage
            mask    = generator.get_mask()
            struct  = np.ones((7,)*mask.ndim)
            mask    = ndimage.binary_dilation(
                    mask.astype(bool),
                    structure=struct,
                    ).astype(bool)
         
                 
        prior = generator.get_prior(mask)
        #import ipdb; ipdb.set_trace()

        nlabel = len(labelset)
        segprior = np.zeros(mask.shape)
        segprior.flat[prior['imask']] = labelset[np.argmax(prior['data'],axis=0)]
            
        entropymap = np.zeros(mask.shape)
        entropymap.flat[prior['imask']] = np.sum(
            np.log(prior['data'] + 1e-10)*prior['data'],
            axis=0)
        entropymap = entropymap / np.log(nlabel) * 2**15
        
        if 'eigenvectors' in prior:
            U = prior.pop('eigenvectors')
            print 'size of U {}, dtype={}'.format(U.size, U.dtype)
            np.save(file_U, U)
            prior['eigenvectors'] = U
        np.savez(file_prior,**prior)
        
        io_analyze.save(file_mask, mask.astype(np.int32))
        io_analyze.save(file_segprior, segprior.astype(np.int32))
        #io_analyze.save(file_entropymap, entropymap.astype(np.int32))
        
    mask  = io_analyze.load(file_mask).astype(bool)
    prior = dict(np.load(file_prior))
    if pca:
        U = np.load(file_U)
        prior['eigenvectors'] = U
    return prior, mask
    def process_sample(self,test,fold=None):

        ## get prior
        prior, mask = load_or_compute_prior_and_mask(
            test,
            fold=fold,
            force_recompute=self.force_recompute_prior)
        seeds   = (-1)*mask
        
        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'        
        logger.info('segmenting data: {}'.format(file_name))
        im      = io_analyze.load(file_name)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg     = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
           
        ## normalize image
        nim = im/np.std(im)
            
        ## init anchor_api
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=self.prior_weights,
            image=nim,
            )
           
        ## start segmenting
        #import ipdb; ipdb.set_trace()
        sol,y = rwsegment.segment(
            nim, 
            anchor_api,
            seeds=seeds, 
            labelset=self.labelset, 
            weight_function=self.weight_function,
            **self.params
            )

        ## compute losses
        z = seg.ravel()==np.c_[self.labelset]
        flatmask = mask.ravel()*np.ones((len(self.labelset),1))
        
        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z,y,mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))
        
        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z,y,mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))
        
        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))
 
        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z,y,mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))
        
        ## compute Dice coefficient per label
        dice    = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))
        
        if not config.debug:
            if fold is not None:
                test_name = 'f{}_{}'.format(fold[0][:2], test)
            else:
                test_name = test
            outdir = config.dir_seg + \
                '/{}/{}'.format(self.model_name,test_name)
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)
        
            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
            
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) 
            np.savetxt(
                outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
    def compute_mean_segmentation(self, list):
        for test in list:
            file_gt = config.dir_reg + test + 'seg.hdr'
            seg     = io_analyze.load(file_gt)
            seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
           

            ## get prior
            prior, mask = load_or_compute_prior_and_mask(
                test,force_recompute=self.force_recompute_prior)
            mask = mask.astype(bool)            
           

            y = np.zeros((len(self.labelset),seg.size))
            y[:,0] = 1
            y.flat[prior['imask']] = prior['data']
 
            sol = np.zeros(seg.shape,dtype=np.int32)
            sol[mask] = self.labelset[np.argmax(prior['data'],axis=0)]

            ## compute losses
            z = seg.ravel()==np.c_[self.labelset]
            flatmask = mask.ravel()*np.ones((len(self.labelset),1))
 
            ## loss 0 : 1 - Dice(y,z)
            loss0 = loss_functions.ideal_loss(z,y,mask=flatmask)
            logger.info('Tloss = {}'.format(loss0))
            
            ## loss2: squared difference with ztilde
            #loss1 = loss_functions.anchor_loss(z,y,mask=flatmask)
            #logger.info('SDloss = {}'.format(loss1))
            
            ## loss3: laplacian loss
            #loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask)
            #logger.info('LAPloss = {}'.format(loss2))
 
            ## loss4: linear loss
            #loss3 = loss_functions.linear_loss(z,y,mask=flatmask)
            #logger.info('LINloss = {}'.format(loss3))
            
            ## compute Dice coefficient per label
            dice    = compute_dice_coef(sol, seg,labelset=self.labelset)
            logger.info('Dice: {}'.format(dice))
            
            if not config.debug:
                outdir = config.dir_seg + \
                    '/{}/{}'.format('mean',test)
                logger.info('saving data in: {}'.format(outdir))
                if not os.path.isdir(outdir):
                    os.makedirs(outdir)
            
                #f = open(outdir + 'losses.txt', 'w')
                #f.write('ideal_loss\t{}\n'.format(loss0))
                #f.write('anchor_loss\t{}\n'.format(loss1))
                #f.write('laplacian_loss\t{}\n'.format(loss2))
                #f.close()
                
                io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) 

                np.savetxt(
                    outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
    def make_training_set(self, test, fold=None):
        if fold is None:
            fold = [test]

        ## training images and segmentations
        if self.isroot:
            slice_border = 20  # do not consider top and bottom slices
            images = []
            segmentations = []
            metadata = []

            for train in self.training_vols:
                if train in fold: continue
                logger.info('loading training data: {}'.format(train))

                ## file names
                file_seg = self.dir_reg + test + train + 'regseg.hdr'
                file_im = self.dir_reg + test + train + 'reggray.hdr'

                ## load image
                im = io_analyze.load(file_im)
                im = im / np.std(im)  # normalize image by std

                ## load segmentation
                seg = io_analyze.load(file_seg).astype(int)
                seg.flat[~np.in1d(seg.ravel(), self.labelset
                                  )] = self.labelset[0]

                if self.crop:
                    ## if split training images into smaller sets
                    pmask = -1 * np.ones(seg.shape, dtype=int)
                    pmask.flat[self.prior['imask']] = np.arange(
                        len(self.prior['imask']))
                    nslice = im.shape[0]
                    for i in range(nslice / self.slice_step):
                        istart = i * self.slice_step
                        iend = np.minimum(
                            nslice, i * self.slice_step + self.slice_size)
                        if istart < slice_border or istart > (im.shape[0] -
                                                              slice_border):
                            continue
                        islices = np.arange(istart, iend)
                        if np.all(seg[islices]==self.labelset[0]) or \
                           np.all(self.seeds[islices]>=0):
                            continue
                        logger.debug(
                            'ivol {}, slices: start end: {} {}'.format(
                                len(images), istart, iend))
                        bin = (seg[islices].ravel() == np.c_[self.labelset]
                               )  # make bin vector z
                        pmaski = pmask[islices]
                        imask = np.where(pmaski.ravel() > 0)[0]
                        iimask = pmaski.flat[imask]
                        #iimask = pmask[islices]
                        #iimask = iimask[iimask>=0]

                        ## append to training set
                        images.append(im[islices])
                        segmentations.append(bin)
                        metadata.append({
                            'islices': islices,
                            'imask': imask,
                            'iimask': iimask
                        })

                        ## break loop
                        if len(images) == self.select_vol.stop:
                            break

                else:
                    bin = (seg.ravel() == np.c_[self.labelset]
                           )  # make bin vector z
                    ## append to training set
                    images.append(im)
                    segmentations.append(bin)
                    metadata.append({})

                ## break loop
                if len(images) == self.select_vol.stop:
                    break

            nmaxvol = 100
            if len(images) > nmaxvol:
                iselect = np.arange(len(images))
                iselect = iselect[np.random.randint(
                    0,
                    len(iselect),
                    np.minimum(nmaxvol, len(iselect)),
                )]
                iselect = np.sort(iselect)
                logger.info('selected training: {}'.format(iselect))
                images = [images[i] for i in iselect]
                segmentations = [segmentations[i] for i in iselect]
                metadata = [metadata[i] for i in iselect]

            ntrain = len(images)
            logger.info('Learning with {} training examples'\
                .format(ntrain))
            self.training_set = (images, segmentations, metadata)
 def run_svm_inference(self,test,w):
     logger.info('running inference on: {}'.format(test))
     
     outdir = self.dir_inf + test
     if not os.path.isdir(outdir):
         os.makedirs(outdir)
 
     ## segment test image with trained w
     def wwf(im,_w):    
         ''' meta weight function'''
         data = 0
         for iwf,wf in enumerate(self.weight_functions.values()):
             ij,_data = wf(im)
             data += _w[iwf]*_data
         return ij, data
     
     ## load images and ground truth
     file_seg = self.dir_reg + test + 'seg.hdr'
     file_im  = self.dir_reg + test + 'gray.hdr'
     im  = io_analyze.load(file_im)
     seg = io_analyze.load(file_seg)
     seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]
     
     ## save image
     im = im/np.std(im) # normalize image by variance
 
     ## prior
     anchor_api = BaseAnchorAPI(
         self.prior, 
         anchor_weight=w[-1],
         )
 
     sol,y = rwsegment.segment(
         im, 
         anchor_api, 
         seeds=self.seeds,
         weight_function=lambda im: wwf(im, w),
         **self.rwparams_inf
         )
     
     np.save(outdir + 'y.test.npy',y)        
     io_analyze.save(outdir + 'sol.test.hdr',sol.astype(np.int32))
     
     ## compute Dice coefficient
     dice = compute_dice_coef(sol, seg,labelset=self.labelset)
     np.savetxt(
         outdir + 'dice.test.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
         
     ## inference compare with gold standard
     dice_gold = np.loadtxt(outdir + 'dice.gold.txt')
     y_gold    = np.load(outdir + 'y.gold.npy')        
     sol_gold  = io_analyze.load(outdir + 'sol.gold.hdr')
     
     np.testing.assert_allclose(
         dice.values(), 
         dict(dice_gold).values(), 
         err_msg='FAIL: dice coef mismatch',
         atol=1e-8)
     np.testing.assert_allclose(y, y_gold,  err_msg='FAIL: y mismatch')
     np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch')
     
     print 'PASS: inference tests'
示例#17
0
    def process_sample(self, train, test):
        outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test
        logger.info('saving data in: {}'.format(outdir))
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

        ## get prior
        from scipy import ndimage
        segtrain = io_analyze.load(config.dir_reg + test + train +
                                   '/regseg.hdr')
        segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0]
        struct = np.ones((10, ) * segtrain.ndim)
        mask = ndimage.binary_dilation(
            segtrain > 0,
            structure=struct,
        ).astype(bool)

        #prior, mask = load_or_compute_prior_and_mask(
        #    test,force_recompute=self.force_recompute_prior)
        #mask = mask.astype(bool)

        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'
        logger.info('segmenting data: {}'.format(file_name))
        im = io_analyze.load(file_name).astype(float)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]

        ## normalize image
        nim = im / np.std(im)

        #orient_scores = self.load_or_compute_orientations(train,test, mask=mask)

        if 1:  #not os.path.isfile(outdir + 'points.npy'):

            from rwsegment import boundary_utils
            reload(boundary_utils)
            ## sample points
            points = boundary_utils.sample_points(im, self.step, mask=mask)
            points = points[mask[tuple(points.T)]]
            impoints = np.zeros(im.shape, dtype=int)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1
            ipoints = np.where(impoints.ravel())[0]
            points = np.argwhere(impoints)
            np.save(outdir + 'points.npy', points)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1

            ## set unary potentials from prior: array of unary costs
            nlabel = len(self.labelset)
            dist = self.distance_to_train(segtrain, points)
            T = 10.0
            prob_pts = np.exp(-(dist / T)**2) / np.c_[np.sum(
                np.exp(-(dist / T)**2), axis=1)]
            #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))]
            #prob[mask.ravel(),:] = prior['data'].T
            #prob_pts = prob[ipoints,:]
            np.save(outdir + 'prob_points.npy', prob_pts)

            ## binary potentials
            ## compute edges
            edges, edgev, labels = boundary_utils.get_edges(im,
                                                            points,
                                                            mask=mask)
            edges = np.sort(edges, axis=1)
            np.save(outdir + 'edges.npy', edges)

            ## get orientation hist
            orient_scores, hist = self.load_or_compute_orientations(train,
                                                                    test,
                                                                    mask=mask)

            ##classify edges
            vecs = points[edges[:, 1]] - points[edges[:, 0]]
            vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2, axis=1))]
            scores = self.get_orient_scores(vecs)
            prob_orient = np.dot(scores, orient_scores)
            #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)]
            np.save(outdir + 'prob_orient.npy', prob_orient)
            ''' 
            ## load classifier
            classifier = self.load_or_compute_classifier(train,test, mask=mask)
 
            ## extract profiles
            profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0)
   
            ## make features  
            x = boundary_utils.make_features(
                profiles, 
                size=self.sizex, 
                additional=[dists,edgev,edgev/dists],
                )
            
            ## classify
            cl, scores = classifier.classify(x)

            ## ground truth
            z = boundary_utils.is_boundary(points, edges, seg)

            logger.info('non boundary classification: {}%'\
                .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100))
            logger.info('boundary classification: {}%'\
                .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100))
            np.save(outdir + 'classified.npy', cl) 

            ## probabilities
            prob_edges = 1.  - scores/np.c_[np.sum(scores, axis=1)]
      
            ##save probs
            np.save(outdir + 'prob_edges.npy',prob_edges)
            '''
        else:
            points = np.load(outdir + 'points.npy')
            edges = np.load(outdir + 'edges.npy')
            cl = np.load(outdir + 'classified.npy')
            prob_pts = np.load(outdir + 'prob_points.npy')
            #prob_edges = np.load(outdir + 'prob_edges.npy')
            prob_orient = np.load(outdir + 'prob_orient.npy')

        ## make potentials
        unary = -np.log(prob_pts + 1e-10)
        #binary = - np.log(prob_edges + 1e-10)
        #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1]
        thresh = (len(self.orients) - 1.0) / len(self.orients)
        orient_cost = -np.log(np.clip(prob_orient + thresh, 0, 1) +
                              1e-10) * 100
        orient_cost = np.clip(orient_cost, 0, 1e10)
        #import ipdb; ipdb.set_trace()

        ## solve MRF:
        import ipdb
        ipdb.set_trace()
        '''
        from rwsegment.mrf import fastPD
        class CostFunction(object):
            def __init__(self,**kwargs):
                self.binary = kwargs.pop('binary',0)
                self.orient_indices = kwargs.pop('orient_indices')
                self.orient_cost = kwargs.pop('orient_cost')

            def __call__(self,e,l1,l2):
                idpair = self.orient_indices[l1,l2]
                pair_cost = self.orient_cost[e,idpair]
                cost = (l1!=l2)*pair_cost
                #return (l1!=l2)*(1-cl[e])*0.1
                #return (l1!=l2)*self.binary[e,1]*0.1
                #y = l1!=l2
                #return self.binary[e, y]*pair_cost
                print e, l1, l2, cost
                return cost
 
        #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True)  
        cost_function = CostFunction(
            #binary=binary,
            orient_indices=self.orient_indices,
            orient_cost=orient_cost,
            )
        sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True)  
        '''
        wpairs = orient_cost
        from rwsegment.mrf import trw
        sol, en = trw.TRW_general(unary,
                                  edges,
                                  wpairs,
                                  niters=1000,
                                  verbose=True)

        labels = self.labelset[sol]
        imsol = np.ones(im.shape, dtype=np.int32) * 20
        imsol[tuple(points.T)] = labels
        io_analyze.save(outdir + 'imseeds.hdr', imsol)

        ## classify sol
        gtlabels = seg[tuple(points.T)]
        priorlabels = self.labelset[np.argmin(unary, axis=1)]

        err_prior = 1 - np.sum(gtlabels == priorlabels) / float(len(points))
        err = 1 - np.sum(gtlabels == labels) / float(len(points))

        logger.info('error in prior sol: {}%'.format(err_prior * 100))
        logger.info('error in sol: {}%'.format(err * 100))

        import ipdb
        ipdb.set_trace()

        ## start segmenting
        sol, y = rwsegment.segment(nim,
                                   seeds=seeds,
                                   labelset=self.labelset,
                                   weight_function=self.weight_function,
                                   **self.params)

        ## compute Dice coefficient per label
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))

        if not config.debug:
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32))
            np.savetxt(outdir + 'dice.txt',
                       np.c_[dice.keys(), dice.values()],
                       fmt='%d %.8f')
示例#18
0
    def process_sample(self,train, test):
        outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test
        logger.info('saving data in: {}'.format(outdir))
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        
        ## get prior
        from scipy import ndimage
        segtrain = io_analyze.load(config.dir_reg + test + train + '/regseg.hdr')
        segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0]
        struct  = np.ones((10,)*segtrain.ndim)
        mask    = ndimage.binary_dilation(
                segtrain>0,
                structure=struct,
                ).astype(bool)
 
        #prior, mask = load_or_compute_prior_and_mask(
        #    test,force_recompute=self.force_recompute_prior)
        #mask = mask.astype(bool)

        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'        
        logger.info('segmenting data: {}'.format(file_name))
        im      = io_analyze.load(file_name).astype(float)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg     = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
           
        ## normalize image
        nim = im/np.std(im)
     

        #orient_scores = self.load_or_compute_orientations(train,test, mask=mask)
 
        if 1:#not os.path.isfile(outdir + 'points.npy'):
  
            from rwsegment import boundary_utils
            reload(boundary_utils)
            ## sample points
            points = boundary_utils.sample_points(im, self.step,  mask=mask)
            points = points[mask[tuple(points.T)]]
            impoints = np.zeros(im.shape,dtype=int)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1
            ipoints = np.where(impoints.ravel())[0]
            points = np.argwhere(impoints) 
            np.save(outdir + 'points.npy', points)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1

            ## set unary potentials from prior: array of unary costs
            nlabel = len(self.labelset)
            dist = self.distance_to_train(segtrain, points)
            T = 10.0
            prob_pts = np.exp(-(dist/T)**2) / np.c_[np.sum(np.exp(-(dist/T)**2),axis=1)]
            #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))]
            #prob[mask.ravel(),:] = prior['data'].T
            #prob_pts = prob[ipoints,:]
            np.save(outdir + 'prob_points.npy', prob_pts) 
    
            ## binary potentials
            ## compute edges
            edges,edgev,labels = boundary_utils.get_edges(im, points,  mask=mask)
            edges = np.sort(edges,axis=1)
            np.save(outdir + 'edges.npy', edges)

            ## get orientation hist
            orient_scores,hist = self.load_or_compute_orientations(train,test, mask=mask)

            ##classify edges
            vecs = points[edges[:,1]] - points[edges[:,0]]
            vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2,axis=1))] 
            scores = self.get_orient_scores(vecs)
            prob_orient = np.dot(scores, orient_scores)
            #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)]
            np.save(outdir + 'prob_orient.npy', prob_orient) 

            ''' 
            ## load classifier
            classifier = self.load_or_compute_classifier(train,test, mask=mask)
 
            ## extract profiles
            profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0)
   
            ## make features  
            x = boundary_utils.make_features(
                profiles, 
                size=self.sizex, 
                additional=[dists,edgev,edgev/dists],
                )
            
            ## classify
            cl, scores = classifier.classify(x)

            ## ground truth
            z = boundary_utils.is_boundary(points, edges, seg)

            logger.info('non boundary classification: {}%'\
                .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100))
            logger.info('boundary classification: {}%'\
                .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100))
            np.save(outdir + 'classified.npy', cl) 

            ## probabilities
            prob_edges = 1.  - scores/np.c_[np.sum(scores, axis=1)]
      
            ##save probs
            np.save(outdir + 'prob_edges.npy',prob_edges)
            '''
        else:
            points     = np.load(outdir + 'points.npy')
            edges      = np.load(outdir + 'edges.npy')
            cl         = np.load(outdir + 'classified.npy') 
            prob_pts   = np.load(outdir + 'prob_points.npy')
            #prob_edges = np.load(outdir + 'prob_edges.npy')
            prob_orient = np.load(outdir + 'prob_orient.npy') 

        ## make potentials
        unary  = - np.log(prob_pts + 1e-10)
        #binary = - np.log(prob_edges + 1e-10)
        #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1]
        thresh = (len(self.orients) - 1.0) / len(self.orients)
        orient_cost = - np.log(np.clip(prob_orient + thresh,0,1) + 1e-10)*100
        orient_cost = np.clip(orient_cost, 0, 1e10)
        #import ipdb; ipdb.set_trace()

        ## solve MRF:
        import ipdb; ipdb.set_trace()
        '''
        from rwsegment.mrf import fastPD
        class CostFunction(object):
            def __init__(self,**kwargs):
                self.binary = kwargs.pop('binary',0)
                self.orient_indices = kwargs.pop('orient_indices')
                self.orient_cost = kwargs.pop('orient_cost')

            def __call__(self,e,l1,l2):
                idpair = self.orient_indices[l1,l2]
                pair_cost = self.orient_cost[e,idpair]
                cost = (l1!=l2)*pair_cost
                #return (l1!=l2)*(1-cl[e])*0.1
                #return (l1!=l2)*self.binary[e,1]*0.1
                #y = l1!=l2
                #return self.binary[e, y]*pair_cost
                print e, l1, l2, cost
                return cost
 
        #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True)  
        cost_function = CostFunction(
            #binary=binary,
            orient_indices=self.orient_indices,
            orient_cost=orient_cost,
            )
        sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True)  
        '''
        wpairs = orient_cost
        from rwsegment.mrf import trw
        sol, en = trw.TRW_general(
            unary, edges, wpairs, niters=1000, verbose=True)

        labels = self.labelset[sol]
        imsol = np.ones(im.shape, dtype=np.int32)*20
        imsol[tuple(points.T)] = labels
        io_analyze.save(outdir + 'imseeds.hdr', imsol)

        ## classify sol
        gtlabels    = seg[tuple(points.T)]
        priorlabels = self.labelset[np.argmin(unary,axis=1)]

        err_prior = 1 - np.sum(gtlabels==priorlabels)/float(len(points))
        err       = 1 - np.sum(gtlabels==labels)/float(len(points))

        logger.info('error in prior sol: {}%'.format(err_prior*100))
        logger.info('error in sol: {}%'.format(err*100))

        import ipdb; ipdb.set_trace()

        ## start segmenting
        sol,y = rwsegment.segment(
            nim, 
            seeds=seeds, 
            labelset=self.labelset, 
            weight_function=self.weight_function,
            **self.params
            )

       
        ## compute Dice coefficient per label
        dice    = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))
        
        if not config.debug:
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32))
            np.savetxt(
                outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
    def run_svm_inference(self, test, w, test_dir):
        logger.info('running inference on: {}'.format(test))

        ## normalize w
        # w = w / np.sqrt(np.dot(w,w))
        strw = ' '.join('{:.3}'.format(val)
                        for val in np.asarray(w) * self.psi_scale)
        logger.debug('scaled w=[{}]'.format(strw))

        weights_laplacians = np.asarray(w)[self.indices_laplacians]
        weights_laplacians_h = np.asarray(
            self.hand_tuned_w)[self.indices_laplacians]
        weights_priors = np.asarray(w)[self.indices_priors]
        weights_priors_h = np.asarray(self.hand_tuned_w)[self.indices_priors]

        ## segment test image with trained w
        '''
        def meta_weight_functions(im,i,j,_w):    
            data = 0
            for iwf,wf in enumerate(self.laplacian_functions):
                _data = wf(im,i,j)
                data += _w[iwf]*_data
            return data
        weight_function = lambda im: meta_weight_functions(im,i,j,weights_laplacians)
        weight_function_h = lambda im: meta_weight_functions(im,i,j,weights_laplacians_h)
        '''
        weight_function = MetaLaplacianFunction(weights_laplacians,
                                                self.laplacian_functions)

        weight_function_h = MetaLaplacianFunction(weights_laplacians_h,
                                                  self.laplacian_functions)

        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im = self.dir_reg + test + 'gray.hdr'
        im = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0]

        nim = im / np.std(im)  # normalize image by std

        ## test training data ?
        inference_train = True
        if inference_train:
            train_ims, train_segs, train_metas = self.training_set
            for tim, tz, tmeta in zip(train_ims, train_segs, train_metas):
                ## retrieve metadata
                islices = tmeta.pop('islices', None)
                imask = tmeta.pop('imask', None)
                iimask = tmeta.pop('iimask', None)
                if islices is not None:
                    tseeds = self.seeds[islices]
                    tprior = {
                        'data': np.asarray(self.prior['data'])[:, iimask],
                        'imask': imask,
                        'variance': np.asarray(self.prior['variance'])[:,
                                                                       iimask],
                        'labelset': self.labelset,
                    }
                    if 'intensity' in self.prior:
                        tprior['intensity'] = self.prior['intensity']
                else:
                    tseeds = self.seeds
                    tprior = self.prior

                ## prior
                tseg = self.labelset[np.argmax(tz, axis=0)].reshape(tim.shape)
                tanchor_api = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors,
                    image=tim,
                )
                tsol, ty = rwsegment.segment(tim,
                                             tanchor_api,
                                             seeds=tseeds,
                                             weight_function=weight_function,
                                             **self.rwparams_inf)
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train: \n{}'.format(tdice))
                nlabel = len(self.labelset)
                tflatmask = np.zeros(ty.shape, dtype=bool)
                tflatmask[:, imask] = True
                loss0 = loss_functions.ideal_loss(tz, ty, mask=tflatmask)
                logger.info('Tloss = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz, ty, mask=tflatmask)
                logger.info('SDloss = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz, ty, mask=tflatmask)
                logger.info('LAPloss = {}'.format(loss2))

                tanchor_api_h = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors_h,
                    image=tim,
                )

                tsol, ty = rwsegment.segment(tim,
                                             tanchor_api_h,
                                             seeds=tseeds,
                                             weight_function=weight_function_h,
                                             **self.rwparams_inf)
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info(
                    'Dice coefficients for train (hand-tuned): \n{}'.format(
                        tdice))
                loss0 = loss_functions.ideal_loss(tz, ty, mask=tflatmask)
                logger.info('Tloss (hand-tuned) = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz, ty, mask=tflatmask)
                logger.info('SDloss (hand-tuned) = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz, ty, mask=tflatmask)
                logger.info('LAPloss (hand-tuned) = {}'.format(loss2))
                break

        ## prior
        anchor_api = MetaAnchor(
            self.prior,
            self.prior_functions,
            weights_priors,
            image=nim,
        )

        sol, y = rwsegment.segment(nim,
                                   anchor_api,
                                   seeds=self.seeds,
                                   weight_function=weight_function,
                                   **self.rwparams_inf)

        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        logger.info('Dice coefficients: \n{}'.format(dice))

        ## objective
        en_rw = rwsegment.energy_rw(nim,
                                    y,
                                    seeds=self.seeds,
                                    weight_function=weight_function,
                                    **self.rwparams_inf)
        en_anchor = rwsegment.energy_anchor(nim,
                                            y,
                                            anchor_api,
                                            seeds=self.seeds,
                                            **self.rwparams_inf)
        obj = en_rw + en_anchor
        logger.info('Objective = {:.3}'.format(obj))

        ## compute losses
        z = seg.ravel() == np.c_[self.labelset]
        mask = self.seeds < 0
        flatmask = mask.ravel() * np.ones((len(self.labelset), 1))

        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z, y, mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))

        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z, y, mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))

        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z, y, mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))

        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z, y, mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))

        ## saving
        if self.debug:
            pass
        elif self.isroot:
            outdir = self.dir_inf + test_dir
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

            #io_analyze.save(outdir + 'im.hdr',im.astype(np.int32))
            #np.save(outdir + 'y.npy',y)
            #io_analyze.save(outdir + 'sol.hdr',sol.astype(np.int32))
            np.savetxt(outdir + 'objective.txt', [obj])
            np.savetxt(outdir + 'dice.txt',
                       np.c_[dice.keys(), dice.values()],
                       fmt='%d %f')

            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
示例#20
0
def compute_objective(test, y, w):
    im = io_analyze.load(config.dir_reg + test + 'gray.hdr')
    nim = im / np.std(im)

    prior, mask = load_or_compute_prior_and_mask(test, force_recompute=False)
    seeds = (-1) * mask.astype(int)

    from rwsegment import rwsegment_prior_models as models
    from rwsegment import weight_functions as wflib
    rwparams = {
        'labelset': np.asarray(config.labelset),

        # optimization
        'rtol': 1e-6,
        'maxiter': 1e3,
        'per_label': True,
        'optim_solver': 'unconstrained',
    }

    weight_functions = {
        'std_b10': lambda im: wflib.weight_std(im, beta=10),
        'std_b50': lambda im: wflib.weight_std(im, beta=50),
        'std_b100': lambda im: wflib.weight_std(im, beta=100),
        'inv_b100o1': lambda im: wflib.weight_inv(im, beta=100, offset=1),
        # 'pdiff_r1b10': lambda im: wflib.weight_patch_diff(im, r0=1, beta=10),
        # 'pdiff_r2b10': lambda im: wflib.weight_patch_diff(im, r0=2, beta=10),
        # 'pdiff_r1b50' : lambda im: wflib.weight_patch_diff(im, r0=1, beta=50),
    }

    prior_models = {
        'constant': models.Constant,
        'entropy': models.Entropy_no_D,
        'intensity': models.Intensity,
    }

    ## indices of w
    nlaplacian = len(weight_functions)
    nprior = len(prior_models)
    indices_laplacians = np.arange(nlaplacian)
    indices_priors = np.arange(nlaplacian, nlaplacian + nprior)

    laplacian_functions = weight_functions.values()
    laplacian_names = weight_functions.keys()
    prior_functions = prior_models.values()
    prior_names = prior_models.keys()

    weights_laplacians = np.asarray(w)[indices_laplacians]
    weights_priors = np.asarray(w)[indices_priors]

    def meta_weight_functions(im, _w):
        ''' meta weight function'''
        data = 0
        for iwf, wf in enumerate(laplacian_functions):
            ij, _data = wf(im)
            data += _w[iwf] * _data
        return ij, data

    weight_function = lambda im: meta_weight_functions(im, weights_laplacians)

    from svm_rw_api import MetaAnchor
    anchor_api = MetaAnchor(
        prior,
        prior_functions,
        weights_priors,
        image=nim,
    )

    from rwsegment import rwsegment
    en_rw = rwsegment.energy_rw(nim,
                                y,
                                seeds=seeds,
                                weight_function=weight_function,
                                **rwparams)

    en_anchor = rwsegment.energy_anchor(nim,
                                        y,
                                        anchor_api,
                                        seeds=seeds,
                                        **rwparams)
    obj = en_rw + en_anchor
    return obj
示例#21
0
def compute_features(test, train, y):
    im = io_analyze.load(config.dir_reg + test + train + 'reggray.hdr')
    nim = im / np.std(im)

    prior, mask = load_or_compute_prior_and_mask(test, force_recompute=False)
    seeds = (-1) * mask.astype(int)

    from rwsegment import rwsegment_prior_models as models
    from rwsegment import weight_functions as wflib
    rwparams = {
        'labelset': np.asarray(config.labelset),
    }

    weight_functions = {
        'std_b10': lambda im: wflib.weight_std(im, beta=10),
        'std_b50': lambda im: wflib.weight_std(im, beta=50),
        'std_b100': lambda im: wflib.weight_std(im, beta=100),
        'inv_b100o1': lambda im: wflib.weight_inv(im, beta=100, offset=1),
        # 'pdiff_r1b10': lambda im: wflib.weight_patch_diff(im, r0=1, beta=10),
        # 'pdiff_r2b10': lambda im: wflib.weight_patch_diff(im, r0=2, beta=10),
        # 'pdiff_r1b50' : lambda im: wflib.weight_patch_diff(im, r0=1, beta=50),
    }

    prior_models = {
        'constant': models.Constant,
        'entropy': models.Entropy_no_D,
        'intensity': models.Intensity,
    }

    ## indices of w
    nlaplacian = len(weight_functions)
    nprior = len(prior_models)
    indices_laplacians = np.arange(nlaplacian)
    indices_priors = np.arange(nlaplacian, nlaplacian + nprior)

    laplacian_functions = weight_functions.values()
    laplacian_names = weight_functions.keys()
    prior_functions = prior_models.values()
    prior_names = prior_models.keys()

    #from svm_rw_api import MetaAnchor
    #anchor_api = MetaAnchor(
    #    prior,
    #    prior_functions,
    #    weights_priors,
    #    image=im,
    #    )

    from rwsegment import rwsegment
    for fname in weight_functions:
        wf = weight_functions[fname]
        en_rw = rwsegment.energy_rw(nim,
                                    y,
                                    seeds=seeds,
                                    weight_function=wf,
                                    **rwparams)
        print fname, en_rw

    for mname in prior_models:
        pm = prior_models[mname](prior, 1., image=nim)
        en_anchor = rwsegment.energy_anchor(nim,
                                            y,
                                            pm,
                                            seeds=seeds,
                                            **rwparams)
        print mname, en_anchor
示例#22
0
def load_or_compute_prior_and_mask(test,
                                   force_recompute=False,
                                   pca=False,
                                   fold=None):
    if fold is not None:
        test_name = 'f{}_{}/'.format(fold[0][:2], test)
    else:
        test_name = test
        fold = [test]

    #import ipdb; ipdb.set_trace()
    labelset = np.asarray(config.labelset)
    if pca:
        outdir = config.dir_pca_prior + test_name
    else:
        outdir = config.dir_prior + test_name
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    ## load mask and prior
    prior = None
    file_mask = outdir + 'mask.hdr'
    file_prior = outdir + 'prior.npz'
    file_U = outdir + 'eigenvectors.npy'
    file_segprior = outdir + 'segprior.hdr'
    file_entropymap = outdir + 'entropymap.hdr'

    if force_recompute or not (os.path.exists(file_prior)
                               and os.path.exists(file_mask)):
        if pca:
            _prior, mask = load_or_compute_prior_and_mask(test, fold=fold)
            generator = rwsegment_pca_prior.PriorGenerator(labelset, mask=mask)
        else:
            generator = rwsegment_prior.PriorGenerator(labelset)

        ntrain = 0
        for train in config.vols:
            if train in fold: continue
            logger.debug('load training img: {}'.format(train))

            ## segmentation
            file_seg = config.dir_reg + test + train + 'regseg.hdr'
            seg = io_analyze.load(file_seg)

            ## image (for intensity prior)
            file_im = config.dir_reg + test + train + 'reggray.hdr'
            im = io_analyze.load(file_im)

            generator.add_training_data(seg, image=im, nrandom=5)
            ntrain += 1

        if not pca:
            from scipy import ndimage
            mask = generator.get_mask()
            struct = np.ones((7, ) * mask.ndim)
            mask = ndimage.binary_dilation(
                mask.astype(bool),
                structure=struct,
            ).astype(bool)

        prior = generator.get_prior(mask)
        #import ipdb; ipdb.set_trace()

        nlabel = len(labelset)
        segprior = np.zeros(mask.shape)
        segprior.flat[prior['imask']] = labelset[np.argmax(prior['data'],
                                                           axis=0)]

        entropymap = np.zeros(mask.shape)
        entropymap.flat[prior['imask']] = np.sum(
            np.log(prior['data'] + 1e-10) * prior['data'], axis=0)
        entropymap = entropymap / np.log(nlabel) * 2**15

        if 'eigenvectors' in prior:
            U = prior.pop('eigenvectors')
            print 'size of U {}, dtype={}'.format(U.size, U.dtype)
            np.save(file_U, U)
            prior['eigenvectors'] = U
        np.savez(file_prior, **prior)

        io_analyze.save(file_mask, mask.astype(np.int32))
        io_analyze.save(file_segprior, segprior.astype(np.int32))
        #io_analyze.save(file_entropymap, entropymap.astype(np.int32))

    mask = io_analyze.load(file_mask).astype(bool)
    prior = dict(np.load(file_prior))
    if pca:
        U = np.load(file_U)
        prior['eigenvectors'] = U
    return prior, mask
    def make_training_set(self,test, fold=None):
        if fold is None:
            fold = [test]

        ## training images and segmentations
        if self.isroot:
            slice_border = 20 # do not consider top and bottom slices
            images = []
            segmentations = []
            metadata = []

            for train in self.training_vols:
                if train in fold: continue
                logger.info('loading training data: {}'.format(train))
                 
                ## file names
                file_seg = self.dir_reg + test + train + 'regseg.hdr'
                file_im  = self.dir_reg + test + train + 'reggray.hdr'
                
                ## load image
                im  = io_analyze.load(file_im)
                im = im/np.std(im) # normalize image by std
                
                ## load segmentation
                seg = io_analyze.load(file_seg).astype(int)
                seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]

                if self.crop:
                    ## if split training images into smaller sets
                    pmask = -1 * np.ones(seg.shape, dtype=int)
                    pmask.flat[self.prior['imask']] = np.arange(len(self.prior['imask']))
                    nslice = im.shape[0]
                    for i in range(nslice/self.slice_step):    
                        istart = i*self.slice_step
                        iend = np.minimum(nslice, i*self.slice_step + self.slice_size)
                        if istart < slice_border or istart > (im.shape[0] - slice_border):
                            continue
                        islices = np.arange(istart, iend)
                        if np.all(seg[islices]==self.labelset[0]) or \
                           np.all(self.seeds[islices]>=0):
                            continue
                        logger.debug('ivol {}, slices: start end: {} {}'.format(len(images),istart, iend))
                        bin = (seg[islices].ravel()==np.c_[self.labelset]) # make bin vector z
                        pmaski = pmask[islices]
                        imask  = np.where(pmaski.ravel()>0)[0]
                        iimask = pmaski.flat[imask]
                        #iimask = pmask[islices]
                        #iimask = iimask[iimask>=0]
                        
                        ## append to training set
                        images.append(im[islices])
                        segmentations.append(bin)
                        metadata.append({'islices': islices, 'imask':imask , 'iimask': iimask})

                        ## break loop
                        if len(images) == self.select_vol.stop:
                            break 

                else:
                    bin = (seg.ravel()==np.c_[self.labelset])# make bin vector z                
                    ## append to training set
                    images.append(im)
                    segmentations.append(bin)
                    metadata.append({})

                ## break loop
                if len(images) == self.select_vol.stop:
                    break 

            nmaxvol = 100
            if len(images) > nmaxvol:
                iselect = np.arange(len(images))
                iselect = iselect[np.random.randint(
                    0,len(iselect),
                    np.minimum(nmaxvol, len(iselect)),
                    )]
                iselect = np.sort(iselect)
                logger.info('selected training: {}'.format(iselect))
                images = [images[i] for i in iselect]
                segmentations = [segmentations[i] for i in iselect]
                metadata = [metadata[i] for i in iselect]

            ntrain = len(images)
            logger.info('Learning with {} training examples'\
                .format(ntrain))
            self.training_set = (images, segmentations, metadata) 
def compute_objective(test, y, w):
    im = io_analyze.load(config.dir_reg + test + 'gray.hdr')
    nim = im/np.std(im)
     
    prior,mask = load_or_compute_prior_and_mask(
        test, force_recompute=False)
    seeds = (-1)*mask.astype(int)

    from rwsegment import rwsegment_prior_models as models
    from rwsegment import weight_functions as wflib
    rwparams = {
            'labelset': np.asarray(config.labelset),

            # optimization
            'rtol': 1e-6,
            'maxiter': 1e3,
            'per_label':True,
            'optim_solver':'unconstrained',
            }

    weight_functions = {
        'std_b10'     : lambda im: wflib.weight_std(im, beta=10),
        'std_b50'     : lambda im: wflib.weight_std(im, beta=50),
        'std_b100'    : lambda im: wflib.weight_std(im, beta=100),
        'inv_b100o1'  : lambda im: wflib.weight_inv(im, beta=100, offset=1),
        # 'pdiff_r1b10': lambda im: wflib.weight_patch_diff(im, r0=1, beta=10),
        # 'pdiff_r2b10': lambda im: wflib.weight_patch_diff(im, r0=2, beta=10),
        # 'pdiff_r1b50' : lambda im: wflib.weight_patch_diff(im, r0=1, beta=50),
        }

    prior_models = {
        'constant': models.Constant,
        'entropy': models.Entropy_no_D,
        'intensity': models.Intensity,
        }
    
    ## indices of w
    nlaplacian = len(weight_functions)
    nprior = len(prior_models)
    indices_laplacians = np.arange(nlaplacian)
    indices_priors = np.arange(nlaplacian,nlaplacian + nprior)
  
    laplacian_functions = weight_functions.values()
    laplacian_names     = weight_functions.keys()
    prior_functions     = prior_models.values()
    prior_names         = prior_models.keys()
    
    weights_laplacians = np.asarray(w)[indices_laplacians]
    weights_priors = np.asarray(w)[indices_priors]

    def meta_weight_functions(im,_w):
        ''' meta weight function'''
        data = 0
        for iwf,wf in enumerate(laplacian_functions):
            ij,_data = wf(im)
            data += _w[iwf]*_data
        return ij, data
    weight_function = lambda im: meta_weight_functions(im, weights_laplacians)
    
    

    from svm_rw_api import MetaAnchor 
    anchor_api = MetaAnchor(
        prior,
        prior_functions,
        weights_priors,
        image=nim,
        )

    from rwsegment import rwsegment
    en_rw = rwsegment.energy_rw(
            nim,
            y,
            seeds=seeds,
            weight_function=weight_function,
            **rwparams
            )

    en_anchor = rwsegment.energy_anchor(
            nim,
            y,
            anchor_api,
            seeds=seeds,
            **rwparams
            )
    obj = en_rw + en_anchor
    return obj
示例#25
0
            use_gt = True
        if '--method' in sys.argv:
            i = sys.argv.index('--method')
            method = sys.argv[i + 1]
        if '--slice' in sys.argv:
            i = sys.argv.index('--slice')
            islice = int(sys.argv[i + 1])
        if '--test' in sys.argv:
            i = sys.argv.index('--test')
            test = sys.argv[i + 1] + '/'
        print sys.argv

        from rwsegment import io_analyze
        dir = methods[method][2] + methods[method][1] + '/'
        print dir
        print test
        print islice
        sol = io_analyze.load(dir + test + 'sol.hdr')[islice]
        seg = io_analyze.load(config.dir_reg + test + 'seg.hdr')[islice]
        gray = io_analyze.load(config.dir_reg + test + 'gray.hdr')[islice]
        if use_gt:
            method = 'gt'
            plot_cross(gray, seg, seg)
        else:
            plot_cross(gray, sol, seg)
        pyplot.savefig('/home/baudinpy/plots/cross_{}_{}_{}.png'.format(
            method, test[:-1], islice))
        print 'saving to: {}'.format(
            '/home/baudinpy/plots/cross_{}_{}_{}.png'.format(
                method, test[:-1], islice))
def compute_features(test, train, y):
    im = io_analyze.load(config.dir_reg + test + train + 'reggray.hdr')
    nim = im/np.std(im)
     
    prior,mask = load_or_compute_prior_and_mask(
        test, force_recompute=False)
    seeds = (-1)*mask.astype(int)

    from rwsegment import rwsegment_prior_models as models
    from rwsegment import weight_functions as wflib
    rwparams = {
            'labelset': np.asarray(config.labelset),
        }

    weight_functions = {
        'std_b10'     : lambda im: wflib.weight_std(im, beta=10),
        'std_b50'     : lambda im: wflib.weight_std(im, beta=50),
        'std_b100'    : lambda im: wflib.weight_std(im, beta=100),
        'inv_b100o1'  : lambda im: wflib.weight_inv(im, beta=100, offset=1),
        # 'pdiff_r1b10': lambda im: wflib.weight_patch_diff(im, r0=1, beta=10),
        # 'pdiff_r2b10': lambda im: wflib.weight_patch_diff(im, r0=2, beta=10),
        # 'pdiff_r1b50' : lambda im: wflib.weight_patch_diff(im, r0=1, beta=50),
        }

    prior_models = {
        'constant': models.Constant,
        'entropy': models.Entropy_no_D,
        'intensity': models.Intensity,
        }
    
    ## indices of w
    nlaplacian = len(weight_functions)
    nprior = len(prior_models)
    indices_laplacians = np.arange(nlaplacian)
    indices_priors = np.arange(nlaplacian,nlaplacian + nprior)
  
    laplacian_functions = weight_functions.values()
    laplacian_names     = weight_functions.keys()
    prior_functions     = prior_models.values()
    prior_names         = prior_models.keys()
    

    #from svm_rw_api import MetaAnchor 
    #anchor_api = MetaAnchor(
    #    prior,
    #    prior_functions,
    #    weights_priors,
    #    image=im,
    #    )

    from rwsegment import rwsegment
    for fname in weight_functions:
        wf = weight_functions[fname]
        en_rw = rwsegment.energy_rw(
            nim,
            y,
            seeds=seeds,
            weight_function=wf,
            **rwparams
            )
        print fname, en_rw

    for mname in prior_models:
        pm = prior_models[mname](prior,1., image=nim)
        en_anchor = rwsegment.energy_anchor(
            nim,
            y,
            pm,
            seeds=seeds,
            **rwparams
            )
        print mname, en_anchor
示例#27
0
        if '--gt' in sys.argv:
           use_gt = True
        if '--method' in sys.argv:
            i = sys.argv.index('--method')
            method = sys.argv[i+1]
        if '--slice' in sys.argv:
            i = sys.argv.index('--slice')
            islice = int(sys.argv[i+1])
        if '--test' in sys.argv:
            i = sys.argv.index('--test')
            test = sys.argv[i+1] + '/'
        print sys.argv


        from rwsegment import io_analyze
        dir = methods[method][2] + methods[method][1] + '/'
        print dir
        print test
        print islice
        sol = io_analyze.load(dir + test + 'sol.hdr')[islice]
        seg = io_analyze.load(config.dir_reg + test + 'seg.hdr')[islice]
        gray = io_analyze.load(config.dir_reg + test + 'gray.hdr')[islice]
        if use_gt:
            method = 'gt'
            plot_cross(gray, seg, seg)
        else:
            plot_cross(gray, sol, seg)
        pyplot.savefig('/home/baudinpy/plots/cross_{}_{}_{}.png'.format(method, test[:-1], islice))
        print 'saving to: {}'.format('/home/baudinpy/plots/cross_{}_{}_{}.png'.format(method, test[:-1], islice))

    def run_svm_inference(self,test,w, test_dir):
        logger.info('running inference on: {}'.format(test))
        
        ## normalize w
        # w = w / np.sqrt(np.dot(w,w))
        strw = ' '.join('{:.3}'.format(val) for val in np.asarray(w)*self.psi_scale)
        logger.debug('scaled w=[{}]'.format(strw))
    
        weights_laplacians = np.asarray(w)[self.indices_laplacians]
        weights_laplacians_h = np.asarray(self.hand_tuned_w)[self.indices_laplacians]
        weights_priors = np.asarray(w)[self.indices_priors]
        weights_priors_h = np.asarray(self.hand_tuned_w)[self.indices_priors]
    
        ## segment test image with trained w
        '''
        def meta_weight_functions(im,i,j,_w):    
            data = 0
            for iwf,wf in enumerate(self.laplacian_functions):
                _data = wf(im,i,j)
                data += _w[iwf]*_data
            return data
        weight_function = lambda im: meta_weight_functions(im,i,j,weights_laplacians)
        weight_function_h = lambda im: meta_weight_functions(im,i,j,weights_laplacians_h)
        '''
        weight_function = MetaLaplacianFunction(
            weights_laplacians,
            self.laplacian_functions)
        
        weight_function_h = MetaLaplacianFunction(
            weights_laplacians_h,
            self.laplacian_functions)
        
        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im  = self.dir_reg + test + 'gray.hdr'
        im  = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]
        
        nim = im/np.std(im) # normalize image by std

        ## test training data ?
        inference_train = True
        if inference_train:
            train_ims, train_segs, train_metas = self.training_set
            for tim, tz, tmeta in zip(train_ims, train_segs, train_metas):
                ## retrieve metadata
                islices = tmeta.pop('islices',None)
                imask = tmeta.pop('imask', None)
                iimask = tmeta.pop('iimask',None)
                if islices is not None:
                    tseeds = self.seeds[islices]
                    tprior = {
                        'data': np.asarray(self.prior['data'])[:,iimask],
                        'imask': imask,
                        'variance': np.asarray(self.prior['variance'])[:,iimask],
                        'labelset': self.labelset,
                        }
                    if 'intensity' in self.prior: 
                        tprior['intensity'] = self.prior['intensity']
                else:
                    tseeds = self.seeds
                    tprior = self.prior

                ## prior
                tseg = self.labelset[np.argmax(tz, axis=0)].reshape(tim.shape)
                tanchor_api = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors,
                    image=tim,
                    )
                tsol,ty = rwsegment.segment(
                    tim, 
                    tanchor_api, 
                    seeds=tseeds,
                    weight_function=weight_function,
                    **self.rwparams_inf
                    )
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train: \n{}'.format(tdice))
                nlabel = len(self.labelset)
                tflatmask = np.zeros(ty.shape, dtype=bool)
                tflatmask[:,imask] = True
                loss0 = loss_functions.ideal_loss(tz,ty,mask=tflatmask)
                logger.info('Tloss = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz,ty,mask=tflatmask)
                logger.info('SDloss = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz,ty,mask=tflatmask)
                logger.info('LAPloss = {}'.format(loss2))


                tanchor_api_h = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors_h,
                    image=tim,
                    )
            
                tsol,ty = rwsegment.segment(
                    tim, 
                    tanchor_api_h, 
                    seeds=tseeds,
                    weight_function=weight_function_h,
                    **self.rwparams_inf
                    )
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train (hand-tuned): \n{}'.format(tdice))
                loss0 = loss_functions.ideal_loss(tz,ty,mask=tflatmask)
                logger.info('Tloss (hand-tuned) = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz,ty,mask=tflatmask)
                logger.info('SDloss (hand-tuned) = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz,ty,mask=tflatmask)
                logger.info('LAPloss (hand-tuned) = {}'.format(loss2))
                break
 
        ## prior
        anchor_api = MetaAnchor(
            self.prior,
            self.prior_functions,
            weights_priors,
            image=nim,
            )
    
        sol,y = rwsegment.segment(
            nim, 
            anchor_api, 
            seeds=self.seeds,
            weight_function=weight_function,
            **self.rwparams_inf
            )
        
        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice coefficients: \n{}'.format(dice))

        ## objective
        en_rw = rwsegment.energy_rw(
            nim, y, seeds=self.seeds,weight_function=weight_function, **self.rwparams_inf)
        en_anchor = rwsegment.energy_anchor(
            nim, y, anchor_api, seeds=self.seeds, **self.rwparams_inf)
        obj = en_rw + en_anchor
        logger.info('Objective = {:.3}'.format(obj))

        
        ## compute losses
        z = seg.ravel()==np.c_[self.labelset]
        mask = self.seeds < 0
        flatmask = mask.ravel()*np.ones((len(self.labelset),1))
        
        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z,y,mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))
        
        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z,y,mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))
        
        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))

        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z,y,mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))
       
        ## saving
        if self.debug:
            pass
        elif self.isroot:
            outdir = self.dir_inf + test_dir
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)
                
            #io_analyze.save(outdir + 'im.hdr',im.astype(np.int32))
            #np.save(outdir + 'y.npy',y)        
            #io_analyze.save(outdir + 'sol.hdr',sol.astype(np.int32))
            np.savetxt(outdir + 'objective.txt', [obj])
            np.savetxt(
                outdir + 'dice.txt', 
                np.c_[dice.keys(),dice.values()],fmt='%d %f')
        
            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
示例#29
0
    def load_or_compute_classifier(self, train, test, mask=None):
        from rwsegment import boundary_utils
        reload(boundary_utils)

        #idir_prior = config.dir_prior_edges + test
        #if not os.path.isdir(dir_prior):
        #    os.makedirs(dir_prior)

        ## Train classifier
        logger.info('train classifier with train {} for test {}'.format(
            train, test))

        ##load image and seg
        im = io_analyze.load(config.dir_reg + test + train +
                             'reggray.hdr').astype(float)
        nim = im / np.std(im)
        seg = io_analyze.load(config.dir_reg + test + train + 'regseg.hdr')
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]

        ## sample points
        points = boundary_utils.sample_points(im, self.step, mask=mask)
        logger.debug('number of sampled points = {}'.format(len(points)))

        #impoints = np.zeros(im.shape,dtype=int)
        #impoints[tuple(points.T)] = np.arange(len(points)) + 1

        ## compute edges
        edges, edgev, labels = boundary_utils.get_edges(im, points, mask=mask)
        logger.debug('number of edges = {}'.format(len(edges)))

        ## extract profiles
        profiles, emap, dists = boundary_utils.get_profiles(nim,
                                                            points,
                                                            edges,
                                                            rad=0)
        logger.debug('extracted profiles')

        ## make features
        x = boundary_utils.make_features(
            profiles,
            size=self.sizex,
            additional=[dists, edgev, edgev / dists])
        logger.debug('features made, size = {}'.format(len(x[0])))

        ## make annotations
        z = boundary_utils.is_boundary(points, edges, seg)
        logger.debug('annotations made')

        ## learn profiles
        logger.debug('training classifier')
        classifier = boundary_utils.Classifier()
        classifier.train(x, z)

        ## test classification
        logger.debug('testing classifier')
        cl, scores = classifier.classify(x)

        logger.info('non boundary correct rate: {:.3}'.format(
            np.sum((np.r_[cl] == 0) & (np.r_[z] == 0)) /
            np.sum(np.r_[z] == 0).astype(float)))
        logger.info('boundary correct rate: {:.3}'.format(
            np.sum((np.r_[cl] == 1) & (np.r_[z] == 1)) /
            np.sum(np.r_[z] == 1).astype(float)))

        ## store classifier
        #np.savetxt(dir_prior + 'classifier.txt', classifier.w)
        return classifier