Example #1
0
    def run_svm_inference(self, test, w):
        logger.info('running inference on: {}'.format(test))

        outdir = self.dir_inf + test
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

        ## segment test image with trained w
        def wwf(im, _w):
            ''' meta weight function'''
            data = 0
            for iwf, wf in enumerate(self.weight_functions.values()):
                ij, _data = wf(im)
                data += _w[iwf] * _data
            return ij, data

        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im = self.dir_reg + test + 'gray.hdr'
        im = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0]

        ## save image
        im = im / np.std(im)  # normalize image by variance

        ## prior
        anchor_api = BaseAnchorAPI(
            self.prior,
            anchor_weight=w[-1],
        )

        sol, y = rwsegment.segment(im,
                                   anchor_api,
                                   seeds=self.seeds,
                                   weight_function=lambda im: wwf(im, w),
                                   **self.rwparams_inf)

        np.save(outdir + 'y.test.npy', y)
        io_analyze.save(outdir + 'sol.test.hdr', sol.astype(np.int32))

        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        np.savetxt(outdir + 'dice.test.txt',
                   np.c_[dice.keys(), dice.values()],
                   fmt='%d %.8f')

        ## inference compare with gold standard
        dice_gold = np.loadtxt(outdir + 'dice.gold.txt')
        y_gold = np.load(outdir + 'y.gold.npy')
        sol_gold = io_analyze.load(outdir + 'sol.gold.hdr')

        np.testing.assert_allclose(dice.values(),
                                   dict(dice_gold).values(),
                                   err_msg='FAIL: dice coef mismatch',
                                   atol=1e-8)
        np.testing.assert_allclose(y, y_gold, err_msg='FAIL: y mismatch')
        np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch')

        print 'PASS: inference tests'
    def compute_exact_aci(self, w, x, z, y0, **kwargs):
        islices = kwargs.pop('islices', None)
        iimask = kwargs.pop('iimask', None)
        imask = kwargs.pop('imask', None)
        if islices is not None:
            seeds = self.seeds[islices]
            mask = [
                self.immask[islices].ravel() for i in range(len(self.labelset))
            ]
            prior = {
                'data': np.asarray(self.prior['data'])[:, iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:, iimask],
                'labelset': self.labelset,
            }
            if 'intensity' in self.prior:
                prior['intensity'] = self.prior['intensity']
        else:
            mask = self.mask
            seeds = self.seeds
            prior = self.prior

        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
        )

        ## combine all prior models
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            image=x,
        )

        ## annotation consistent inference
        y = rwsegment.segment(
            x,
            anchor_api,
            seeds=seeds,
            weight_function=weight_function,
            return_arguments=['y'],
            ground_truth=z,
            ground_truth_init=y0,
            #laplacian_label_weights=,
            **self.rwparams)
        return y
Example #3
0
 def compute_exact_aci(self,w,x,z,y0,**kwargs):
     islices = kwargs.pop('islices',None)
     iimask = kwargs.pop('iimask',None)
     imask = kwargs.pop('imask',None)
     if islices is not None:
         seeds = self.seeds[islices]
         mask = [self.immask[islices].ravel() for i in range(len(self.labelset))]
         prior = {
             'data': np.asarray(self.prior['data'])[:,iimask],
             'imask': imask,
             'variance': np.asarray(self.prior['variance'])[:,iimask],
             'labelset': self.labelset,
             }
         if 'intensity' in self.prior: prior['intensity'] = self.prior['intensity']
     else:
         mask = self.mask
         seeds = self.seeds
         prior = self.prior
     
     weight_function = MetaLaplacianFunction(
         np.asarray(w)[self.indices_laplacians],
         self.laplacian_functions,
         )
     
     ## combine all prior models
     anchor_api = MetaAnchor(
         prior=prior,
         prior_models=self.prior_models,
         prior_weights=np.asarray(w)[self.indices_priors],
         image=x,
         )
     
     ## annotation consistent inference
     y = rwsegment.segment(
         x, 
         anchor_api,
         seeds=seeds,
         weight_function=weight_function,
         return_arguments=['y'],
         ground_truth=z,
         ground_truth_init=y0,
         #laplacian_label_weights=,
         **self.rwparams
         )
     return y 
    def run_svm_inference(self,test,w, test_dir):
        logger.info('running inference on: {}'.format(test))
        
        ## normalize w
        # w = w / np.sqrt(np.dot(w,w))
        strw = ' '.join('{:.3}'.format(val) for val in np.asarray(w)*self.psi_scale)
        logger.debug('scaled w=[{}]'.format(strw))
    
        weights_laplacians = np.asarray(w)[self.indices_laplacians]
        weights_laplacians_h = np.asarray(self.hand_tuned_w)[self.indices_laplacians]
        weights_priors = np.asarray(w)[self.indices_priors]
        weights_priors_h = np.asarray(self.hand_tuned_w)[self.indices_priors]
    
        ## segment test image with trained w
        '''
        def meta_weight_functions(im,i,j,_w):    
            data = 0
            for iwf,wf in enumerate(self.laplacian_functions):
                _data = wf(im,i,j)
                data += _w[iwf]*_data
            return data
        weight_function = lambda im: meta_weight_functions(im,i,j,weights_laplacians)
        weight_function_h = lambda im: meta_weight_functions(im,i,j,weights_laplacians_h)
        '''
        weight_function = MetaLaplacianFunction(
            weights_laplacians,
            self.laplacian_functions)
        
        weight_function_h = MetaLaplacianFunction(
            weights_laplacians_h,
            self.laplacian_functions)
        
        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im  = self.dir_reg + test + 'gray.hdr'
        im  = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]
        
        nim = im/np.std(im) # normalize image by std

        ## test training data ?
        inference_train = True
        if inference_train:
            train_ims, train_segs, train_metas = self.training_set
            for tim, tz, tmeta in zip(train_ims, train_segs, train_metas):
                ## retrieve metadata
                islices = tmeta.pop('islices',None)
                imask = tmeta.pop('imask', None)
                iimask = tmeta.pop('iimask',None)
                if islices is not None:
                    tseeds = self.seeds[islices]
                    tprior = {
                        'data': np.asarray(self.prior['data'])[:,iimask],
                        'imask': imask,
                        'variance': np.asarray(self.prior['variance'])[:,iimask],
                        'labelset': self.labelset,
                        }
                    if 'intensity' in self.prior: 
                        tprior['intensity'] = self.prior['intensity']
                else:
                    tseeds = self.seeds
                    tprior = self.prior

                ## prior
                tseg = self.labelset[np.argmax(tz, axis=0)].reshape(tim.shape)
                tanchor_api = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors,
                    image=tim,
                    )
                tsol,ty = rwsegment.segment(
                    tim, 
                    tanchor_api, 
                    seeds=tseeds,
                    weight_function=weight_function,
                    **self.rwparams_inf
                    )
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train: \n{}'.format(tdice))
                nlabel = len(self.labelset)
                tflatmask = np.zeros(ty.shape, dtype=bool)
                tflatmask[:,imask] = True
                loss0 = loss_functions.ideal_loss(tz,ty,mask=tflatmask)
                logger.info('Tloss = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz,ty,mask=tflatmask)
                logger.info('SDloss = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz,ty,mask=tflatmask)
                logger.info('LAPloss = {}'.format(loss2))


                tanchor_api_h = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors_h,
                    image=tim,
                    )
            
                tsol,ty = rwsegment.segment(
                    tim, 
                    tanchor_api_h, 
                    seeds=tseeds,
                    weight_function=weight_function_h,
                    **self.rwparams_inf
                    )
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train (hand-tuned): \n{}'.format(tdice))
                loss0 = loss_functions.ideal_loss(tz,ty,mask=tflatmask)
                logger.info('Tloss (hand-tuned) = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz,ty,mask=tflatmask)
                logger.info('SDloss (hand-tuned) = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz,ty,mask=tflatmask)
                logger.info('LAPloss (hand-tuned) = {}'.format(loss2))
                break
 
        ## prior
        anchor_api = MetaAnchor(
            self.prior,
            self.prior_functions,
            weights_priors,
            image=nim,
            )
    
        sol,y = rwsegment.segment(
            nim, 
            anchor_api, 
            seeds=self.seeds,
            weight_function=weight_function,
            **self.rwparams_inf
            )
        
        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice coefficients: \n{}'.format(dice))

        ## objective
        en_rw = rwsegment.energy_rw(
            nim, y, seeds=self.seeds,weight_function=weight_function, **self.rwparams_inf)
        en_anchor = rwsegment.energy_anchor(
            nim, y, anchor_api, seeds=self.seeds, **self.rwparams_inf)
        obj = en_rw + en_anchor
        logger.info('Objective = {:.3}'.format(obj))

        
        ## compute losses
        z = seg.ravel()==np.c_[self.labelset]
        mask = self.seeds < 0
        flatmask = mask.ravel()*np.ones((len(self.labelset),1))
        
        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z,y,mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))
        
        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z,y,mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))
        
        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))

        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z,y,mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))
       
        ## saving
        if self.debug:
            pass
        elif self.isroot:
            outdir = self.dir_inf + test_dir
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)
                
            #io_analyze.save(outdir + 'im.hdr',im.astype(np.int32))
            #np.save(outdir + 'y.npy',y)        
            #io_analyze.save(outdir + 'sol.hdr',sol.astype(np.int32))
            np.savetxt(outdir + 'objective.txt', [obj])
            np.savetxt(
                outdir + 'dice.txt', 
                np.c_[dice.keys(),dice.values()],fmt='%d %f')
        
            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
Example #5
0
    def compute_approximate_aci(self, w,x,z,y0,**kwargs):
        logger.info("using approximate aci (Danny's)")
        islices = kwargs.pop('islices',None)
        imask = kwargs.pop('imask',None)
        iimask = kwargs.pop('iimask',None)
        if islices is not None:
            seeds = self.seeds[islices]
            mask = [self.immask[islices].ravel() for i in range(len(self.labelset))]
            prior = {
                'data': np.asarray(self.prior['data'])[:,iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:,iimask],
                'labelset': self.labelset,
                }
            if 'intensity' in self.prior: prior['intensity'] = self.prior['intensity']
        else:
            mask = self.mask
            seeds = self.seeds
            prior = self.prior
        
        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
            )
        
        ## combine all prior models
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            image=x,
            )

        class GroundTruthAnchor(object):
            def __init__(self, anchor_api, gt, gt_weights):
                self.anchor_api = anchor_api
                self.gt = gt
                self.gt_weights = gt_weights
            def get_labelset(self): 
                return self.anchor_api.get_labelset()

            def get_anchor_and_weights(self, D, indices):
                anchor, weights = self.anchor_api.get_anchor_and_weights(D,indices)
                gt_weights = self.gt_weights[:,indices]
                gt = self.gt[:,indices]
                new_weights = weights + gt_weights
                new_anchor = (anchor * weights + gt*gt_weights) / new_weights
                return new_anchor, new_weights
                
        self.approx_aci_maxiter = 200
        self.approx_aci_maxstep = 1e-2
        z_weights = np.zeros(np.asarray(z).shape)
        z_label = np.argmax(z,axis=0)
        for i in range(self.approx_aci_maxiter):
            logger.debug("approx aci, iter={}".format(i))
    
            ## add ground truth to anchor api
            modified_api = GroundTruthAnchor(anchor_api, z, z_weights)

            ## inference
            y_ = rwsegment.segment(
                x, 
                modified_api,
                seeds=seeds,
                weight_function=weight_function,
                return_arguments=['y'],
                **self.rwparams
                )

            ## loss            
            #loss = self.compute_loss(z,y_, islices=islices)
            loss = loss_functions.ideal_loss(z,y_,mask=mask)
            logger.debug('loss = {}'.format(loss))
            if loss < 1e-8: 
                break
            
            ## uptade weights
            delta = np.max(y_ - y_[z_label, np.arange(y_.shape[1])], axis=0)
            delta = np.clip(delta, 0, self.approx_aci_maxstep)
            z_weights += delta

        return y_        
Example #6
0
    def compute_approximate_aci2(self, w,x,z,y0,**kwargs):
        logger.info('using approximate aci')
        islices = kwargs.pop('islices',None)
        imask = kwargs.pop('imask',None)
        iimask = kwargs.pop('iimask',None)
        if islices is not None:
            seeds = self.seeds[islices]
            mask = [self.immask[islices].ravel() for i in range(len(self.labelset))]
            prior = {
                'data': np.asarray(self.prior['data'])[:,iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:,iimask],
                'labelset': self.labelset,
                }
            if 'intensity' in self.prior: prior['intensity'] = self.prior['intensity']
        else:
            mask = self.mask
            seeds = self.seeds
            prior = self.prior
        
        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
            )
        
        ## combine all prior models
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            image=x,
            )

        ## unconstrained inference
        y_ = rwsegment.segment(
            x, 
            anchor_api,
            seeds=seeds,
            weight_function=weight_function,
            return_arguments=['y'],
            #laplacian_label_weights=,
            **self.rwparams
            )

        ## fix correct labels
        gt = np.argmax(z,axis=0)
        icorrect = np.argmax(y_,axis=0)==gt
        seeds_correct = -np.ones(seeds.shape, dtype=int)
        seeds_correct.flat[icorrect] = self.labelset[gt[icorrect]]

        ## annotation consistent inference
        #import ipdb; ipdb.set_trace()
        y = rwsegment.segment(
            x, 
            anchor_api,
            seeds=seeds_correct,
            weight_function=weight_function,
            return_arguments=['y'],
            ground_truth=z,
            ground_truth_init=y0,
            seeds_prob=y_,
            #laplacian_label_weights=,
            **self.rwparams
            )
        y[:,icorrect] = y_[:,icorrect]
        #import ipdb; ipdb.set_trace()
        return y                
Example #7
0
    def full_lai(self, w,x,z, switch_loss=False, iter=-1, **kwargs):
        ''' full Loss Augmented Inference
         y_ = arg min <w|-psi(x,y_)> - loss(y,y_) '''

        if np.max(np.abs(w)) < 1e-10:
            # if w=0, return z_tilde
            y_ = np.random.random((len(z),len(z[0])))
            y_[np.argmax(z, axis=0), np.arange(len(z[0]))] = 0
            y_ = y_ /np.sum(y_,axis=0)
            return y_
         
        islices = kwargs.pop('islices',None)
        imask = kwargs.pop('imask',None)
        iimask = kwargs.pop('iimask',None)
        if islices is not None:   
            im = x
            seeds = self.seeds[islices]
            mask = [self.immask[islices].ravel() for i in range(len(self.labelset))]
            prior = {
                'data': np.asarray(self.prior['data'])[:,iimask],
                'imask':imask,
                'variance': np.asarray(self.prior['variance'])[:,iimask],
                'labelset': self.labelset,
                }
            if 'intensity' in self.prior: prior['intensity'] = self.prior['intensity']
            seg = z
        else:
            im = x
            mask = self.mask
            seeds = self.seeds
            prior = self.prior
            seg = z
           
        ## combine all weight functions
        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
            )
                
        ## loss type
        addlin      = None
        loss        = None
        loss_weight = None
        L_loss      = None
        
        loss_type = self.loss_type
        if loss_type in ['ideal', 'none']:
            pass
        elif loss_type=='squareddiff':
            loss, loss_weight = loss_functions.compute_loss_anchor(seg, mask=mask)
            loss_weight *= self.loss_factor
        elif loss_type=='laplacian':
            L_loss = - loss_functions.compute_loss_laplacian(seg, mask=mask) *\
                 self.loss_factor
        elif loss_type=='linear':
            addlin, linw = loss_functions.compute_loss_linear(seg, mask=mask)
            addlin *= linw * self.loss_factor
        else:
            raise Exception('did not recognize loss type {}'.format(loss_type))
            sys.exit(1)

        ## loss function        
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            loss=loss,
            loss_weight=loss_weight,
            image=im,
            ) 
        ## best y_ most different from y
        y_ = rwsegment.segment(
            im, 
            anchor_api,
            seeds=seeds,
            weight_function=weight_function,
            return_arguments=['y'],
            additional_laplacian=L_loss,
            additional_linear=addlin,
            #laplacian_label_weights=,
            **self.rwparams
            )
            
        return y_
Example #8
0
    def process_sample(self, train, test):
        outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test
        logger.info('saving data in: {}'.format(outdir))
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

        ## get prior
        from scipy import ndimage
        segtrain = io_analyze.load(config.dir_reg + test + train +
                                   '/regseg.hdr')
        segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0]
        struct = np.ones((10, ) * segtrain.ndim)
        mask = ndimage.binary_dilation(
            segtrain > 0,
            structure=struct,
        ).astype(bool)

        #prior, mask = load_or_compute_prior_and_mask(
        #    test,force_recompute=self.force_recompute_prior)
        #mask = mask.astype(bool)

        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'
        logger.info('segmenting data: {}'.format(file_name))
        im = io_analyze.load(file_name).astype(float)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]

        ## normalize image
        nim = im / np.std(im)

        #orient_scores = self.load_or_compute_orientations(train,test, mask=mask)

        if 1:  #not os.path.isfile(outdir + 'points.npy'):

            from rwsegment import boundary_utils
            reload(boundary_utils)
            ## sample points
            points = boundary_utils.sample_points(im, self.step, mask=mask)
            points = points[mask[tuple(points.T)]]
            impoints = np.zeros(im.shape, dtype=int)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1
            ipoints = np.where(impoints.ravel())[0]
            points = np.argwhere(impoints)
            np.save(outdir + 'points.npy', points)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1

            ## set unary potentials from prior: array of unary costs
            nlabel = len(self.labelset)
            dist = self.distance_to_train(segtrain, points)
            T = 10.0
            prob_pts = np.exp(-(dist / T)**2) / np.c_[np.sum(
                np.exp(-(dist / T)**2), axis=1)]
            #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))]
            #prob[mask.ravel(),:] = prior['data'].T
            #prob_pts = prob[ipoints,:]
            np.save(outdir + 'prob_points.npy', prob_pts)

            ## binary potentials
            ## compute edges
            edges, edgev, labels = boundary_utils.get_edges(im,
                                                            points,
                                                            mask=mask)
            edges = np.sort(edges, axis=1)
            np.save(outdir + 'edges.npy', edges)

            ## get orientation hist
            orient_scores, hist = self.load_or_compute_orientations(train,
                                                                    test,
                                                                    mask=mask)

            ##classify edges
            vecs = points[edges[:, 1]] - points[edges[:, 0]]
            vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2, axis=1))]
            scores = self.get_orient_scores(vecs)
            prob_orient = np.dot(scores, orient_scores)
            #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)]
            np.save(outdir + 'prob_orient.npy', prob_orient)
            ''' 
            ## load classifier
            classifier = self.load_or_compute_classifier(train,test, mask=mask)
 
            ## extract profiles
            profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0)
   
            ## make features  
            x = boundary_utils.make_features(
                profiles, 
                size=self.sizex, 
                additional=[dists,edgev,edgev/dists],
                )
            
            ## classify
            cl, scores = classifier.classify(x)

            ## ground truth
            z = boundary_utils.is_boundary(points, edges, seg)

            logger.info('non boundary classification: {}%'\
                .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100))
            logger.info('boundary classification: {}%'\
                .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100))
            np.save(outdir + 'classified.npy', cl) 

            ## probabilities
            prob_edges = 1.  - scores/np.c_[np.sum(scores, axis=1)]
      
            ##save probs
            np.save(outdir + 'prob_edges.npy',prob_edges)
            '''
        else:
            points = np.load(outdir + 'points.npy')
            edges = np.load(outdir + 'edges.npy')
            cl = np.load(outdir + 'classified.npy')
            prob_pts = np.load(outdir + 'prob_points.npy')
            #prob_edges = np.load(outdir + 'prob_edges.npy')
            prob_orient = np.load(outdir + 'prob_orient.npy')

        ## make potentials
        unary = -np.log(prob_pts + 1e-10)
        #binary = - np.log(prob_edges + 1e-10)
        #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1]
        thresh = (len(self.orients) - 1.0) / len(self.orients)
        orient_cost = -np.log(np.clip(prob_orient + thresh, 0, 1) +
                              1e-10) * 100
        orient_cost = np.clip(orient_cost, 0, 1e10)
        #import ipdb; ipdb.set_trace()

        ## solve MRF:
        import ipdb
        ipdb.set_trace()
        '''
        from rwsegment.mrf import fastPD
        class CostFunction(object):
            def __init__(self,**kwargs):
                self.binary = kwargs.pop('binary',0)
                self.orient_indices = kwargs.pop('orient_indices')
                self.orient_cost = kwargs.pop('orient_cost')

            def __call__(self,e,l1,l2):
                idpair = self.orient_indices[l1,l2]
                pair_cost = self.orient_cost[e,idpair]
                cost = (l1!=l2)*pair_cost
                #return (l1!=l2)*(1-cl[e])*0.1
                #return (l1!=l2)*self.binary[e,1]*0.1
                #y = l1!=l2
                #return self.binary[e, y]*pair_cost
                print e, l1, l2, cost
                return cost
 
        #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True)  
        cost_function = CostFunction(
            #binary=binary,
            orient_indices=self.orient_indices,
            orient_cost=orient_cost,
            )
        sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True)  
        '''
        wpairs = orient_cost
        from rwsegment.mrf import trw
        sol, en = trw.TRW_general(unary,
                                  edges,
                                  wpairs,
                                  niters=1000,
                                  verbose=True)

        labels = self.labelset[sol]
        imsol = np.ones(im.shape, dtype=np.int32) * 20
        imsol[tuple(points.T)] = labels
        io_analyze.save(outdir + 'imseeds.hdr', imsol)

        ## classify sol
        gtlabels = seg[tuple(points.T)]
        priorlabels = self.labelset[np.argmin(unary, axis=1)]

        err_prior = 1 - np.sum(gtlabels == priorlabels) / float(len(points))
        err = 1 - np.sum(gtlabels == labels) / float(len(points))

        logger.info('error in prior sol: {}%'.format(err_prior * 100))
        logger.info('error in sol: {}%'.format(err * 100))

        import ipdb
        ipdb.set_trace()

        ## start segmenting
        sol, y = rwsegment.segment(nim,
                                   seeds=seeds,
                                   labelset=self.labelset,
                                   weight_function=self.weight_function,
                                   **self.params)

        ## compute Dice coefficient per label
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))

        if not config.debug:
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32))
            np.savetxt(outdir + 'dice.txt',
                       np.c_[dice.keys(), dice.values()],
                       fmt='%d %.8f')
    def run_svm_inference(self, test, w, test_dir):
        logger.info('running inference on: {}'.format(test))

        ## normalize w
        # w = w / np.sqrt(np.dot(w,w))
        strw = ' '.join('{:.3}'.format(val)
                        for val in np.asarray(w) * self.psi_scale)
        logger.debug('scaled w=[{}]'.format(strw))

        weights_laplacians = np.asarray(w)[self.indices_laplacians]
        weights_laplacians_h = np.asarray(
            self.hand_tuned_w)[self.indices_laplacians]
        weights_priors = np.asarray(w)[self.indices_priors]
        weights_priors_h = np.asarray(self.hand_tuned_w)[self.indices_priors]

        ## segment test image with trained w
        '''
        def meta_weight_functions(im,i,j,_w):    
            data = 0
            for iwf,wf in enumerate(self.laplacian_functions):
                _data = wf(im,i,j)
                data += _w[iwf]*_data
            return data
        weight_function = lambda im: meta_weight_functions(im,i,j,weights_laplacians)
        weight_function_h = lambda im: meta_weight_functions(im,i,j,weights_laplacians_h)
        '''
        weight_function = MetaLaplacianFunction(weights_laplacians,
                                                self.laplacian_functions)

        weight_function_h = MetaLaplacianFunction(weights_laplacians_h,
                                                  self.laplacian_functions)

        ## load images and ground truth
        file_seg = self.dir_reg + test + 'seg.hdr'
        file_im = self.dir_reg + test + 'gray.hdr'
        im = io_analyze.load(file_im)
        seg = io_analyze.load(file_seg)
        seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0]

        nim = im / np.std(im)  # normalize image by std

        ## test training data ?
        inference_train = True
        if inference_train:
            train_ims, train_segs, train_metas = self.training_set
            for tim, tz, tmeta in zip(train_ims, train_segs, train_metas):
                ## retrieve metadata
                islices = tmeta.pop('islices', None)
                imask = tmeta.pop('imask', None)
                iimask = tmeta.pop('iimask', None)
                if islices is not None:
                    tseeds = self.seeds[islices]
                    tprior = {
                        'data': np.asarray(self.prior['data'])[:, iimask],
                        'imask': imask,
                        'variance': np.asarray(self.prior['variance'])[:,
                                                                       iimask],
                        'labelset': self.labelset,
                    }
                    if 'intensity' in self.prior:
                        tprior['intensity'] = self.prior['intensity']
                else:
                    tseeds = self.seeds
                    tprior = self.prior

                ## prior
                tseg = self.labelset[np.argmax(tz, axis=0)].reshape(tim.shape)
                tanchor_api = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors,
                    image=tim,
                )
                tsol, ty = rwsegment.segment(tim,
                                             tanchor_api,
                                             seeds=tseeds,
                                             weight_function=weight_function,
                                             **self.rwparams_inf)
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info('Dice coefficients for train: \n{}'.format(tdice))
                nlabel = len(self.labelset)
                tflatmask = np.zeros(ty.shape, dtype=bool)
                tflatmask[:, imask] = True
                loss0 = loss_functions.ideal_loss(tz, ty, mask=tflatmask)
                logger.info('Tloss = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz, ty, mask=tflatmask)
                logger.info('SDloss = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz, ty, mask=tflatmask)
                logger.info('LAPloss = {}'.format(loss2))

                tanchor_api_h = MetaAnchor(
                    tprior,
                    self.prior_functions,
                    weights_priors_h,
                    image=tim,
                )

                tsol, ty = rwsegment.segment(tim,
                                             tanchor_api_h,
                                             seeds=tseeds,
                                             weight_function=weight_function_h,
                                             **self.rwparams_inf)
                ## compute Dice coefficient
                tdice = compute_dice_coef(tsol, tseg, labelset=self.labelset)
                logger.info(
                    'Dice coefficients for train (hand-tuned): \n{}'.format(
                        tdice))
                loss0 = loss_functions.ideal_loss(tz, ty, mask=tflatmask)
                logger.info('Tloss (hand-tuned) = {}'.format(loss0))
                ## loss2: squared difference with ztilde
                loss1 = loss_functions.anchor_loss(tz, ty, mask=tflatmask)
                logger.info('SDloss (hand-tuned) = {}'.format(loss1))
                ## loss3: laplacian loss
                loss2 = loss_functions.laplacian_loss(tz, ty, mask=tflatmask)
                logger.info('LAPloss (hand-tuned) = {}'.format(loss2))
                break

        ## prior
        anchor_api = MetaAnchor(
            self.prior,
            self.prior_functions,
            weights_priors,
            image=nim,
        )

        sol, y = rwsegment.segment(nim,
                                   anchor_api,
                                   seeds=self.seeds,
                                   weight_function=weight_function,
                                   **self.rwparams_inf)

        ## compute Dice coefficient
        dice = compute_dice_coef(sol, seg, labelset=self.labelset)
        logger.info('Dice coefficients: \n{}'.format(dice))

        ## objective
        en_rw = rwsegment.energy_rw(nim,
                                    y,
                                    seeds=self.seeds,
                                    weight_function=weight_function,
                                    **self.rwparams_inf)
        en_anchor = rwsegment.energy_anchor(nim,
                                            y,
                                            anchor_api,
                                            seeds=self.seeds,
                                            **self.rwparams_inf)
        obj = en_rw + en_anchor
        logger.info('Objective = {:.3}'.format(obj))

        ## compute losses
        z = seg.ravel() == np.c_[self.labelset]
        mask = self.seeds < 0
        flatmask = mask.ravel() * np.ones((len(self.labelset), 1))

        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z, y, mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))

        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z, y, mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))

        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z, y, mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))

        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z, y, mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))

        ## saving
        if self.debug:
            pass
        elif self.isroot:
            outdir = self.dir_inf + test_dir
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

            #io_analyze.save(outdir + 'im.hdr',im.astype(np.int32))
            #np.save(outdir + 'y.npy',y)
            #io_analyze.save(outdir + 'sol.hdr',sol.astype(np.int32))
            np.savetxt(outdir + 'objective.txt', [obj])
            np.savetxt(outdir + 'dice.txt',
                       np.c_[dice.keys(), dice.values()],
                       fmt='%d %f')

            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
 def run_svm_inference(self,test,w):
     logger.info('running inference on: {}'.format(test))
     
     outdir = self.dir_inf + test
     if not os.path.isdir(outdir):
         os.makedirs(outdir)
 
     ## segment test image with trained w
     def wwf(im,_w):    
         ''' meta weight function'''
         data = 0
         for iwf,wf in enumerate(self.weight_functions.values()):
             ij,_data = wf(im)
             data += _w[iwf]*_data
         return ij, data
     
     ## load images and ground truth
     file_seg = self.dir_reg + test + 'seg.hdr'
     file_im  = self.dir_reg + test + 'gray.hdr'
     im  = io_analyze.load(file_im)
     seg = io_analyze.load(file_seg)
     seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0]
     
     ## save image
     im = im/np.std(im) # normalize image by variance
 
     ## prior
     anchor_api = BaseAnchorAPI(
         self.prior, 
         anchor_weight=w[-1],
         )
 
     sol,y = rwsegment.segment(
         im, 
         anchor_api, 
         seeds=self.seeds,
         weight_function=lambda im: wwf(im, w),
         **self.rwparams_inf
         )
     
     np.save(outdir + 'y.test.npy',y)        
     io_analyze.save(outdir + 'sol.test.hdr',sol.astype(np.int32))
     
     ## compute Dice coefficient
     dice = compute_dice_coef(sol, seg,labelset=self.labelset)
     np.savetxt(
         outdir + 'dice.test.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
         
     ## inference compare with gold standard
     dice_gold = np.loadtxt(outdir + 'dice.gold.txt')
     y_gold    = np.load(outdir + 'y.gold.npy')        
     sol_gold  = io_analyze.load(outdir + 'sol.gold.hdr')
     
     np.testing.assert_allclose(
         dice.values(), 
         dict(dice_gold).values(), 
         err_msg='FAIL: dice coef mismatch',
         atol=1e-8)
     np.testing.assert_allclose(y, y_gold,  err_msg='FAIL: y mismatch')
     np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch')
     
     print 'PASS: inference tests'
    def process_sample(self,test,fold=None):

        ## get prior
        prior, mask = load_or_compute_prior_and_mask(
            test,
            fold=fold,
            force_recompute=self.force_recompute_prior)
        seeds   = (-1)*mask
        
        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'        
        logger.info('segmenting data: {}'.format(file_name))
        im      = io_analyze.load(file_name)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg     = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
           
        ## normalize image
        nim = im/np.std(im)
            
        ## init anchor_api
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=self.prior_weights,
            image=nim,
            )
           
        ## start segmenting
        #import ipdb; ipdb.set_trace()
        sol,y = rwsegment.segment(
            nim, 
            anchor_api,
            seeds=seeds, 
            labelset=self.labelset, 
            weight_function=self.weight_function,
            **self.params
            )

        ## compute losses
        z = seg.ravel()==np.c_[self.labelset]
        flatmask = mask.ravel()*np.ones((len(self.labelset),1))
        
        ## loss 0 : 1 - Dice(y,z)
        loss0 = loss_functions.ideal_loss(z,y,mask=flatmask)
        logger.info('Tloss = {}'.format(loss0))
        
        ## loss2: squared difference with ztilde
        loss1 = loss_functions.anchor_loss(z,y,mask=flatmask)
        logger.info('SDloss = {}'.format(loss1))
        
        ## loss3: laplacian loss
        loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask)
        logger.info('LAPloss = {}'.format(loss2))
 
        ## loss4: linear loss
        loss3 = loss_functions.linear_loss(z,y,mask=flatmask)
        logger.info('LINloss = {}'.format(loss3))
        
        ## compute Dice coefficient per label
        dice    = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))
        
        if not config.debug:
            if fold is not None:
                test_name = 'f{}_{}'.format(fold[0][:2], test)
            else:
                test_name = test
            outdir = config.dir_seg + \
                '/{}/{}'.format(self.model_name,test_name)
            logger.info('saving data in: {}'.format(outdir))
            if not os.path.isdir(outdir):
                os.makedirs(outdir)
        
            f = open(outdir + 'losses.txt', 'w')
            f.write('ideal_loss\t{}\n'.format(loss0))
            f.write('anchor_loss\t{}\n'.format(loss1))
            f.write('laplacian_loss\t{}\n'.format(loss2))
            f.close()
            
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) 
            np.savetxt(
                outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
Example #12
0
    def process_sample(self,train, test):
        outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test
        logger.info('saving data in: {}'.format(outdir))
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        
        ## get prior
        from scipy import ndimage
        segtrain = io_analyze.load(config.dir_reg + test + train + '/regseg.hdr')
        segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0]
        struct  = np.ones((10,)*segtrain.ndim)
        mask    = ndimage.binary_dilation(
                segtrain>0,
                structure=struct,
                ).astype(bool)
 
        #prior, mask = load_or_compute_prior_and_mask(
        #    test,force_recompute=self.force_recompute_prior)
        #mask = mask.astype(bool)

        ## load image
        file_name = config.dir_reg + test + 'gray.hdr'        
        logger.info('segmenting data: {}'.format(file_name))
        im      = io_analyze.load(file_name).astype(float)
        file_gt = config.dir_reg + test + 'seg.hdr'
        seg     = io_analyze.load(file_gt)
        seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0]
        
           
        ## normalize image
        nim = im/np.std(im)
     

        #orient_scores = self.load_or_compute_orientations(train,test, mask=mask)
 
        if 1:#not os.path.isfile(outdir + 'points.npy'):
  
            from rwsegment import boundary_utils
            reload(boundary_utils)
            ## sample points
            points = boundary_utils.sample_points(im, self.step,  mask=mask)
            points = points[mask[tuple(points.T)]]
            impoints = np.zeros(im.shape,dtype=int)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1
            ipoints = np.where(impoints.ravel())[0]
            points = np.argwhere(impoints) 
            np.save(outdir + 'points.npy', points)
            impoints[tuple(points.T)] = np.arange(len(points)) + 1

            ## set unary potentials from prior: array of unary costs
            nlabel = len(self.labelset)
            dist = self.distance_to_train(segtrain, points)
            T = 10.0
            prob_pts = np.exp(-(dist/T)**2) / np.c_[np.sum(np.exp(-(dist/T)**2),axis=1)]
            #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))]
            #prob[mask.ravel(),:] = prior['data'].T
            #prob_pts = prob[ipoints,:]
            np.save(outdir + 'prob_points.npy', prob_pts) 
    
            ## binary potentials
            ## compute edges
            edges,edgev,labels = boundary_utils.get_edges(im, points,  mask=mask)
            edges = np.sort(edges,axis=1)
            np.save(outdir + 'edges.npy', edges)

            ## get orientation hist
            orient_scores,hist = self.load_or_compute_orientations(train,test, mask=mask)

            ##classify edges
            vecs = points[edges[:,1]] - points[edges[:,0]]
            vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2,axis=1))] 
            scores = self.get_orient_scores(vecs)
            prob_orient = np.dot(scores, orient_scores)
            #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)]
            np.save(outdir + 'prob_orient.npy', prob_orient) 

            ''' 
            ## load classifier
            classifier = self.load_or_compute_classifier(train,test, mask=mask)
 
            ## extract profiles
            profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0)
   
            ## make features  
            x = boundary_utils.make_features(
                profiles, 
                size=self.sizex, 
                additional=[dists,edgev,edgev/dists],
                )
            
            ## classify
            cl, scores = classifier.classify(x)

            ## ground truth
            z = boundary_utils.is_boundary(points, edges, seg)

            logger.info('non boundary classification: {}%'\
                .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100))
            logger.info('boundary classification: {}%'\
                .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100))
            np.save(outdir + 'classified.npy', cl) 

            ## probabilities
            prob_edges = 1.  - scores/np.c_[np.sum(scores, axis=1)]
      
            ##save probs
            np.save(outdir + 'prob_edges.npy',prob_edges)
            '''
        else:
            points     = np.load(outdir + 'points.npy')
            edges      = np.load(outdir + 'edges.npy')
            cl         = np.load(outdir + 'classified.npy') 
            prob_pts   = np.load(outdir + 'prob_points.npy')
            #prob_edges = np.load(outdir + 'prob_edges.npy')
            prob_orient = np.load(outdir + 'prob_orient.npy') 

        ## make potentials
        unary  = - np.log(prob_pts + 1e-10)
        #binary = - np.log(prob_edges + 1e-10)
        #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1]
        thresh = (len(self.orients) - 1.0) / len(self.orients)
        orient_cost = - np.log(np.clip(prob_orient + thresh,0,1) + 1e-10)*100
        orient_cost = np.clip(orient_cost, 0, 1e10)
        #import ipdb; ipdb.set_trace()

        ## solve MRF:
        import ipdb; ipdb.set_trace()
        '''
        from rwsegment.mrf import fastPD
        class CostFunction(object):
            def __init__(self,**kwargs):
                self.binary = kwargs.pop('binary',0)
                self.orient_indices = kwargs.pop('orient_indices')
                self.orient_cost = kwargs.pop('orient_cost')

            def __call__(self,e,l1,l2):
                idpair = self.orient_indices[l1,l2]
                pair_cost = self.orient_cost[e,idpair]
                cost = (l1!=l2)*pair_cost
                #return (l1!=l2)*(1-cl[e])*0.1
                #return (l1!=l2)*self.binary[e,1]*0.1
                #y = l1!=l2
                #return self.binary[e, y]*pair_cost
                print e, l1, l2, cost
                return cost
 
        #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True)  
        cost_function = CostFunction(
            #binary=binary,
            orient_indices=self.orient_indices,
            orient_cost=orient_cost,
            )
        sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True)  
        '''
        wpairs = orient_cost
        from rwsegment.mrf import trw
        sol, en = trw.TRW_general(
            unary, edges, wpairs, niters=1000, verbose=True)

        labels = self.labelset[sol]
        imsol = np.ones(im.shape, dtype=np.int32)*20
        imsol[tuple(points.T)] = labels
        io_analyze.save(outdir + 'imseeds.hdr', imsol)

        ## classify sol
        gtlabels    = seg[tuple(points.T)]
        priorlabels = self.labelset[np.argmin(unary,axis=1)]

        err_prior = 1 - np.sum(gtlabels==priorlabels)/float(len(points))
        err       = 1 - np.sum(gtlabels==labels)/float(len(points))

        logger.info('error in prior sol: {}%'.format(err_prior*100))
        logger.info('error in sol: {}%'.format(err*100))

        import ipdb; ipdb.set_trace()

        ## start segmenting
        sol,y = rwsegment.segment(
            nim, 
            seeds=seeds, 
            labelset=self.labelset, 
            weight_function=self.weight_function,
            **self.params
            )

       
        ## compute Dice coefficient per label
        dice    = compute_dice_coef(sol, seg,labelset=self.labelset)
        logger.info('Dice: {}'.format(dice))
        
        if not config.debug:
            io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32))
            np.savetxt(
                outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
Example #13
0
    def compute_approximate_aci(self, w, x, z, y0, **kwargs):
        logger.info("using approximate aci (Danny's)")
        islices = kwargs.pop('islices', None)
        imask = kwargs.pop('imask', None)
        iimask = kwargs.pop('iimask', None)
        if islices is not None:
            seeds = self.seeds[islices]
            mask = [
                self.immask[islices].ravel() for i in range(len(self.labelset))
            ]
            prior = {
                'data': np.asarray(self.prior['data'])[:, iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:, iimask],
                'labelset': self.labelset,
            }
            if 'intensity' in self.prior:
                prior['intensity'] = self.prior['intensity']
        else:
            mask = self.mask
            seeds = self.seeds
            prior = self.prior

        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
        )

        ## combine all prior models
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            image=x,
        )

        class GroundTruthAnchor(object):
            def __init__(self, anchor_api, gt, gt_weights):
                self.anchor_api = anchor_api
                self.gt = gt
                self.gt_weights = gt_weights

            def get_labelset(self):
                return self.anchor_api.get_labelset()

            def get_anchor_and_weights(self, D, indices):
                anchor, weights = self.anchor_api.get_anchor_and_weights(
                    D, indices)
                gt_weights = self.gt_weights[:, indices]
                gt = self.gt[:, indices]
                new_weights = weights + gt_weights
                new_anchor = (anchor * weights + gt * gt_weights) / new_weights
                return new_anchor, new_weights

        self.approx_aci_maxiter = 200
        self.approx_aci_maxstep = 1e-2
        z_weights = np.zeros(np.asarray(z).shape)
        z_label = np.argmax(z, axis=0)
        for i in range(self.approx_aci_maxiter):
            logger.debug("approx aci, iter={}".format(i))

            ## add ground truth to anchor api
            modified_api = GroundTruthAnchor(anchor_api, z, z_weights)

            ## inference
            y_ = rwsegment.segment(x,
                                   modified_api,
                                   seeds=seeds,
                                   weight_function=weight_function,
                                   return_arguments=['y'],
                                   **self.rwparams)

            ## loss
            #loss = self.compute_loss(z,y_, islices=islices)
            loss = loss_functions.ideal_loss(z, y_, mask=mask)
            logger.debug('loss = {}'.format(loss))
            if loss < 1e-8:
                break

            ## uptade weights
            delta = np.max(y_ - y_[z_label, np.arange(y_.shape[1])], axis=0)
            delta = np.clip(delta, 0, self.approx_aci_maxstep)
            z_weights += delta

        return y_
Example #14
0
    def compute_approximate_aci2(self, w, x, z, y0, **kwargs):
        logger.info('using approximate aci')
        islices = kwargs.pop('islices', None)
        imask = kwargs.pop('imask', None)
        iimask = kwargs.pop('iimask', None)
        if islices is not None:
            seeds = self.seeds[islices]
            mask = [
                self.immask[islices].ravel() for i in range(len(self.labelset))
            ]
            prior = {
                'data': np.asarray(self.prior['data'])[:, iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:, iimask],
                'labelset': self.labelset,
            }
            if 'intensity' in self.prior:
                prior['intensity'] = self.prior['intensity']
        else:
            mask = self.mask
            seeds = self.seeds
            prior = self.prior

        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
        )

        ## combine all prior models
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            image=x,
        )

        ## unconstrained inference
        y_ = rwsegment.segment(
            x,
            anchor_api,
            seeds=seeds,
            weight_function=weight_function,
            return_arguments=['y'],
            #laplacian_label_weights=,
            **self.rwparams)

        ## fix correct labels
        gt = np.argmax(z, axis=0)
        icorrect = np.argmax(y_, axis=0) == gt
        seeds_correct = -np.ones(seeds.shape, dtype=int)
        seeds_correct.flat[icorrect] = self.labelset[gt[icorrect]]

        ## annotation consistent inference
        #import ipdb; ipdb.set_trace()
        y = rwsegment.segment(
            x,
            anchor_api,
            seeds=seeds_correct,
            weight_function=weight_function,
            return_arguments=['y'],
            ground_truth=z,
            ground_truth_init=y0,
            seeds_prob=y_,
            #laplacian_label_weights=,
            **self.rwparams)
        y[:, icorrect] = y_[:, icorrect]
        #import ipdb; ipdb.set_trace()
        return y
Example #15
0
    def full_lai(self, w, x, z, switch_loss=False, iter=-1, **kwargs):
        ''' full Loss Augmented Inference
         y_ = arg min <w|-psi(x,y_)> - loss(y,y_) '''

        if np.max(np.abs(w)) < 1e-10:
            # if w=0, return z_tilde
            y_ = np.random.random((len(z), len(z[0])))
            y_[np.argmax(z, axis=0), np.arange(len(z[0]))] = 0
            y_ = y_ / np.sum(y_, axis=0)
            return y_

        islices = kwargs.pop('islices', None)
        imask = kwargs.pop('imask', None)
        iimask = kwargs.pop('iimask', None)
        if islices is not None:
            im = x
            seeds = self.seeds[islices]
            mask = [
                self.immask[islices].ravel() for i in range(len(self.labelset))
            ]
            prior = {
                'data': np.asarray(self.prior['data'])[:, iimask],
                'imask': imask,
                'variance': np.asarray(self.prior['variance'])[:, iimask],
                'labelset': self.labelset,
            }
            if 'intensity' in self.prior:
                prior['intensity'] = self.prior['intensity']
            seg = z
        else:
            im = x
            mask = self.mask
            seeds = self.seeds
            prior = self.prior
            seg = z

        ## combine all weight functions
        weight_function = MetaLaplacianFunction(
            np.asarray(w)[self.indices_laplacians],
            self.laplacian_functions,
        )

        ## loss type
        addlin = None
        loss = None
        loss_weight = None
        L_loss = None

        loss_type = self.loss_type
        if loss_type in ['ideal', 'none']:
            pass
        elif loss_type == 'squareddiff':
            loss, loss_weight = loss_functions.compute_loss_anchor(seg,
                                                                   mask=mask)
            loss_weight *= self.loss_factor
        elif loss_type == 'laplacian':
            L_loss = - loss_functions.compute_loss_laplacian(seg, mask=mask) *\
                 self.loss_factor
        elif loss_type == 'linear':
            addlin, linw = loss_functions.compute_loss_linear(seg, mask=mask)
            addlin *= linw * self.loss_factor
        else:
            raise Exception('did not recognize loss type {}'.format(loss_type))
            sys.exit(1)

        ## loss function
        anchor_api = MetaAnchor(
            prior=prior,
            prior_models=self.prior_models,
            prior_weights=np.asarray(w)[self.indices_priors],
            loss=loss,
            loss_weight=loss_weight,
            image=im,
        )
        ## best y_ most different from y
        y_ = rwsegment.segment(
            im,
            anchor_api,
            seeds=seeds,
            weight_function=weight_function,
            return_arguments=['y'],
            additional_laplacian=L_loss,
            additional_linear=addlin,
            #laplacian_label_weights=,
            **self.rwparams)

        return y_