def run_svm_inference(self, test, w): logger.info('running inference on: {}'.format(test)) outdir = self.dir_inf + test if not os.path.isdir(outdir): os.makedirs(outdir) ## segment test image with trained w def wwf(im, _w): ''' meta weight function''' data = 0 for iwf, wf in enumerate(self.weight_functions.values()): ij, _data = wf(im) data += _w[iwf] * _data return ij, data ## load images and ground truth file_seg = self.dir_reg + test + 'seg.hdr' file_im = self.dir_reg + test + 'gray.hdr' im = io_analyze.load(file_im) seg = io_analyze.load(file_seg) seg.flat[~np.in1d(seg.ravel(), self.labelset)] = self.labelset[0] ## save image im = im / np.std(im) # normalize image by variance ## prior anchor_api = BaseAnchorAPI( self.prior, anchor_weight=w[-1], ) sol, y = rwsegment.segment(im, anchor_api, seeds=self.seeds, weight_function=lambda im: wwf(im, w), **self.rwparams_inf) np.save(outdir + 'y.test.npy', y) io_analyze.save(outdir + 'sol.test.hdr', sol.astype(np.int32)) ## compute Dice coefficient dice = compute_dice_coef(sol, seg, labelset=self.labelset) np.savetxt(outdir + 'dice.test.txt', np.c_[dice.keys(), dice.values()], fmt='%d %.8f') ## inference compare with gold standard dice_gold = np.loadtxt(outdir + 'dice.gold.txt') y_gold = np.load(outdir + 'y.gold.npy') sol_gold = io_analyze.load(outdir + 'sol.gold.hdr') np.testing.assert_allclose(dice.values(), dict(dice_gold).values(), err_msg='FAIL: dice coef mismatch', atol=1e-8) np.testing.assert_allclose(y, y_gold, err_msg='FAIL: y mismatch') np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch') print 'PASS: inference tests'
def process_sample(self, test, fold=None): ## get prior prior, mask = load_or_compute_prior_and_mask( test, force_recompute=self.force_recompute_prior, pca=True, fold=fold ) seeds = (-1) * mask mask = mask.astype(bool) ## load image file_name = config.dir_reg + test + "gray.hdr" logger.info("segmenting data: {}".format(file_name)) im = io_analyze.load(file_name) file_gt = config.dir_reg + test + "seg.hdr" seg = io_analyze.load(file_gt) seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0] ## normalize image nim = im / np.std(im) ## init anchor_api anchor_api = MetaAnchor( prior=prior, prior_models=self.prior_models, prior_weights=self.prior_weights, image=nim ) ## start segmenting # import ipdb; ipdb.set_trace() sol, impca = rwsegment_pca.segment(nim, anchor_api, seeds=seeds, labelset=self.labelset, **self.params) ## compute Dice coefficient per label dice = compute_dice_coef(sol, seg, labelset=self.labelset) logger.info("Dice: {}".format(dice)) dice_pca = compute_dice_coef(impca, seg, labelset=self.labelset) logger.info("Dice pca only: {}".format(dice_pca)) if not config.debug: if fold is not None: test_name = "f{}_{}".format(fold[0][:2], test) else: test_name = test outdir = config.dir_seg + "/{}/{}".format(self.model_name, test_name) logger.info("saving data in: {}".format(outdir)) if not os.path.isdir(outdir): os.makedirs(outdir) io_analyze.save(outdir + "sol.hdr", sol.astype(np.int32)) io_analyze.save(outdir + "solpca.hdr", impca.astype(np.int32)) np.savetxt(outdir + "dice.txt", np.c_[dice.keys(), dice.values()], fmt="%d %.8f") np.savetxt(outdir + "dice_pca.txt", np.c_[dice.keys(), dice_pca.values()], fmt="%d %.8f")
def load_or_compute_prior_and_mask(test, force_recompute=False, pca=False, fold=None): if fold is not None: test_name = 'f{}_{}/'.format(fold[0][:2], test) else: test_name = test fold = [test] #import ipdb; ipdb.set_trace() labelset = np.asarray(config.labelset) if pca: outdir = config.dir_pca_prior + test_name else: outdir = config.dir_prior + test_name if not os.path.isdir(outdir): os.makedirs(outdir) ## load mask and prior prior = None file_mask = outdir + 'mask.hdr' file_prior = outdir + 'prior.npz' file_U = outdir + 'eigenvectors.npy' file_segprior = outdir + 'segprior.hdr' file_entropymap = outdir + 'entropymap.hdr' if force_recompute or not (os.path.exists(file_prior) and os.path.exists(file_mask)): if pca: _prior, mask = load_or_compute_prior_and_mask(test, fold=fold) generator = rwsegment_pca_prior.PriorGenerator(labelset, mask=mask) else: generator = rwsegment_prior.PriorGenerator(labelset) ntrain = 0 for train in config.vols: if train in fold: continue logger.debug('load training img: {}'.format(train)) ## segmentation file_seg = config.dir_reg + test + train + 'regseg.hdr' seg = io_analyze.load(file_seg) ## image (for intensity prior) file_im = config.dir_reg + test + train + 'reggray.hdr' im = io_analyze.load(file_im) generator.add_training_data(seg,image=im, nrandom=5) ntrain += 1 if not pca: from scipy import ndimage mask = generator.get_mask() struct = np.ones((7,)*mask.ndim) mask = ndimage.binary_dilation( mask.astype(bool), structure=struct, ).astype(bool) prior = generator.get_prior(mask) #import ipdb; ipdb.set_trace() nlabel = len(labelset) segprior = np.zeros(mask.shape) segprior.flat[prior['imask']] = labelset[np.argmax(prior['data'],axis=0)] entropymap = np.zeros(mask.shape) entropymap.flat[prior['imask']] = np.sum( np.log(prior['data'] + 1e-10)*prior['data'], axis=0) entropymap = entropymap / np.log(nlabel) * 2**15 if 'eigenvectors' in prior: U = prior.pop('eigenvectors') print 'size of U {}, dtype={}'.format(U.size, U.dtype) np.save(file_U, U) prior['eigenvectors'] = U np.savez(file_prior,**prior) io_analyze.save(file_mask, mask.astype(np.int32)) io_analyze.save(file_segprior, segprior.astype(np.int32)) #io_analyze.save(file_entropymap, entropymap.astype(np.int32)) mask = io_analyze.load(file_mask).astype(bool) prior = dict(np.load(file_prior)) if pca: U = np.load(file_U) prior['eigenvectors'] = U return prior, mask
def load_or_compute_prior_and_mask(test, force_recompute=False, pca=False, fold=None): if fold is not None: test_name = 'f{}_{}/'.format(fold[0][:2], test) else: test_name = test fold = [test] #import ipdb; ipdb.set_trace() labelset = np.asarray(config.labelset) if pca: outdir = config.dir_pca_prior + test_name else: outdir = config.dir_prior + test_name if not os.path.isdir(outdir): os.makedirs(outdir) ## load mask and prior prior = None file_mask = outdir + 'mask.hdr' file_prior = outdir + 'prior.npz' file_U = outdir + 'eigenvectors.npy' file_segprior = outdir + 'segprior.hdr' file_entropymap = outdir + 'entropymap.hdr' if force_recompute or not (os.path.exists(file_prior) and os.path.exists(file_mask)): if pca: _prior, mask = load_or_compute_prior_and_mask(test, fold=fold) generator = rwsegment_pca_prior.PriorGenerator(labelset, mask=mask) else: generator = rwsegment_prior.PriorGenerator(labelset) ntrain = 0 for train in config.vols: if train in fold: continue logger.debug('load training img: {}'.format(train)) ## segmentation file_seg = config.dir_reg + test + train + 'regseg.hdr' seg = io_analyze.load(file_seg) ## image (for intensity prior) file_im = config.dir_reg + test + train + 'reggray.hdr' im = io_analyze.load(file_im) generator.add_training_data(seg, image=im, nrandom=5) ntrain += 1 if not pca: from scipy import ndimage mask = generator.get_mask() struct = np.ones((7, ) * mask.ndim) mask = ndimage.binary_dilation( mask.astype(bool), structure=struct, ).astype(bool) prior = generator.get_prior(mask) #import ipdb; ipdb.set_trace() nlabel = len(labelset) segprior = np.zeros(mask.shape) segprior.flat[prior['imask']] = labelset[np.argmax(prior['data'], axis=0)] entropymap = np.zeros(mask.shape) entropymap.flat[prior['imask']] = np.sum( np.log(prior['data'] + 1e-10) * prior['data'], axis=0) entropymap = entropymap / np.log(nlabel) * 2**15 if 'eigenvectors' in prior: U = prior.pop('eigenvectors') print 'size of U {}, dtype={}'.format(U.size, U.dtype) np.save(file_U, U) prior['eigenvectors'] = U np.savez(file_prior, **prior) io_analyze.save(file_mask, mask.astype(np.int32)) io_analyze.save(file_segprior, segprior.astype(np.int32)) #io_analyze.save(file_entropymap, entropymap.astype(np.int32)) mask = io_analyze.load(file_mask).astype(bool) prior = dict(np.load(file_prior)) if pca: U = np.load(file_U) prior['eigenvectors'] = U return prior, mask
def process_sample(self, train, test): outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test logger.info('saving data in: {}'.format(outdir)) if not os.path.isdir(outdir): os.makedirs(outdir) ## get prior from scipy import ndimage segtrain = io_analyze.load(config.dir_reg + test + train + '/regseg.hdr') segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0] struct = np.ones((10, ) * segtrain.ndim) mask = ndimage.binary_dilation( segtrain > 0, structure=struct, ).astype(bool) #prior, mask = load_or_compute_prior_and_mask( # test,force_recompute=self.force_recompute_prior) #mask = mask.astype(bool) ## load image file_name = config.dir_reg + test + 'gray.hdr' logger.info('segmenting data: {}'.format(file_name)) im = io_analyze.load(file_name).astype(float) file_gt = config.dir_reg + test + 'seg.hdr' seg = io_analyze.load(file_gt) seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0] ## normalize image nim = im / np.std(im) #orient_scores = self.load_or_compute_orientations(train,test, mask=mask) if 1: #not os.path.isfile(outdir + 'points.npy'): from rwsegment import boundary_utils reload(boundary_utils) ## sample points points = boundary_utils.sample_points(im, self.step, mask=mask) points = points[mask[tuple(points.T)]] impoints = np.zeros(im.shape, dtype=int) impoints[tuple(points.T)] = np.arange(len(points)) + 1 ipoints = np.where(impoints.ravel())[0] points = np.argwhere(impoints) np.save(outdir + 'points.npy', points) impoints[tuple(points.T)] = np.arange(len(points)) + 1 ## set unary potentials from prior: array of unary costs nlabel = len(self.labelset) dist = self.distance_to_train(segtrain, points) T = 10.0 prob_pts = np.exp(-(dist / T)**2) / np.c_[np.sum( np.exp(-(dist / T)**2), axis=1)] #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))] #prob[mask.ravel(),:] = prior['data'].T #prob_pts = prob[ipoints,:] np.save(outdir + 'prob_points.npy', prob_pts) ## binary potentials ## compute edges edges, edgev, labels = boundary_utils.get_edges(im, points, mask=mask) edges = np.sort(edges, axis=1) np.save(outdir + 'edges.npy', edges) ## get orientation hist orient_scores, hist = self.load_or_compute_orientations(train, test, mask=mask) ##classify edges vecs = points[edges[:, 1]] - points[edges[:, 0]] vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2, axis=1))] scores = self.get_orient_scores(vecs) prob_orient = np.dot(scores, orient_scores) #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)] np.save(outdir + 'prob_orient.npy', prob_orient) ''' ## load classifier classifier = self.load_or_compute_classifier(train,test, mask=mask) ## extract profiles profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0) ## make features x = boundary_utils.make_features( profiles, size=self.sizex, additional=[dists,edgev,edgev/dists], ) ## classify cl, scores = classifier.classify(x) ## ground truth z = boundary_utils.is_boundary(points, edges, seg) logger.info('non boundary classification: {}%'\ .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100)) logger.info('boundary classification: {}%'\ .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100)) np.save(outdir + 'classified.npy', cl) ## probabilities prob_edges = 1. - scores/np.c_[np.sum(scores, axis=1)] ##save probs np.save(outdir + 'prob_edges.npy',prob_edges) ''' else: points = np.load(outdir + 'points.npy') edges = np.load(outdir + 'edges.npy') cl = np.load(outdir + 'classified.npy') prob_pts = np.load(outdir + 'prob_points.npy') #prob_edges = np.load(outdir + 'prob_edges.npy') prob_orient = np.load(outdir + 'prob_orient.npy') ## make potentials unary = -np.log(prob_pts + 1e-10) #binary = - np.log(prob_edges + 1e-10) #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1] thresh = (len(self.orients) - 1.0) / len(self.orients) orient_cost = -np.log(np.clip(prob_orient + thresh, 0, 1) + 1e-10) * 100 orient_cost = np.clip(orient_cost, 0, 1e10) #import ipdb; ipdb.set_trace() ## solve MRF: import ipdb ipdb.set_trace() ''' from rwsegment.mrf import fastPD class CostFunction(object): def __init__(self,**kwargs): self.binary = kwargs.pop('binary',0) self.orient_indices = kwargs.pop('orient_indices') self.orient_cost = kwargs.pop('orient_cost') def __call__(self,e,l1,l2): idpair = self.orient_indices[l1,l2] pair_cost = self.orient_cost[e,idpair] cost = (l1!=l2)*pair_cost #return (l1!=l2)*(1-cl[e])*0.1 #return (l1!=l2)*self.binary[e,1]*0.1 #y = l1!=l2 #return self.binary[e, y]*pair_cost print e, l1, l2, cost return cost #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True) cost_function = CostFunction( #binary=binary, orient_indices=self.orient_indices, orient_cost=orient_cost, ) sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True) ''' wpairs = orient_cost from rwsegment.mrf import trw sol, en = trw.TRW_general(unary, edges, wpairs, niters=1000, verbose=True) labels = self.labelset[sol] imsol = np.ones(im.shape, dtype=np.int32) * 20 imsol[tuple(points.T)] = labels io_analyze.save(outdir + 'imseeds.hdr', imsol) ## classify sol gtlabels = seg[tuple(points.T)] priorlabels = self.labelset[np.argmin(unary, axis=1)] err_prior = 1 - np.sum(gtlabels == priorlabels) / float(len(points)) err = 1 - np.sum(gtlabels == labels) / float(len(points)) logger.info('error in prior sol: {}%'.format(err_prior * 100)) logger.info('error in sol: {}%'.format(err * 100)) import ipdb ipdb.set_trace() ## start segmenting sol, y = rwsegment.segment(nim, seeds=seeds, labelset=self.labelset, weight_function=self.weight_function, **self.params) ## compute Dice coefficient per label dice = compute_dice_coef(sol, seg, labelset=self.labelset) logger.info('Dice: {}'.format(dice)) if not config.debug: io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) np.savetxt(outdir + 'dice.txt', np.c_[dice.keys(), dice.values()], fmt='%d %.8f')
def run_svm_inference(self,test,w): logger.info('running inference on: {}'.format(test)) outdir = self.dir_inf + test if not os.path.isdir(outdir): os.makedirs(outdir) ## segment test image with trained w def wwf(im,_w): ''' meta weight function''' data = 0 for iwf,wf in enumerate(self.weight_functions.values()): ij,_data = wf(im) data += _w[iwf]*_data return ij, data ## load images and ground truth file_seg = self.dir_reg + test + 'seg.hdr' file_im = self.dir_reg + test + 'gray.hdr' im = io_analyze.load(file_im) seg = io_analyze.load(file_seg) seg.flat[~np.in1d(seg.ravel(),self.labelset)] = self.labelset[0] ## save image im = im/np.std(im) # normalize image by variance ## prior anchor_api = BaseAnchorAPI( self.prior, anchor_weight=w[-1], ) sol,y = rwsegment.segment( im, anchor_api, seeds=self.seeds, weight_function=lambda im: wwf(im, w), **self.rwparams_inf ) np.save(outdir + 'y.test.npy',y) io_analyze.save(outdir + 'sol.test.hdr',sol.astype(np.int32)) ## compute Dice coefficient dice = compute_dice_coef(sol, seg,labelset=self.labelset) np.savetxt( outdir + 'dice.test.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f') ## inference compare with gold standard dice_gold = np.loadtxt(outdir + 'dice.gold.txt') y_gold = np.load(outdir + 'y.gold.npy') sol_gold = io_analyze.load(outdir + 'sol.gold.hdr') np.testing.assert_allclose( dice.values(), dict(dice_gold).values(), err_msg='FAIL: dice coef mismatch', atol=1e-8) np.testing.assert_allclose(y, y_gold, err_msg='FAIL: y mismatch') np.testing.assert_equal(sol, sol_gold, err_msg='FAIL: sol mismatch') print 'PASS: inference tests'
def process_sample(self,test,fold=None): ## get prior prior, mask = load_or_compute_prior_and_mask( test, fold=fold, force_recompute=self.force_recompute_prior) seeds = (-1)*mask ## load image file_name = config.dir_reg + test + 'gray.hdr' logger.info('segmenting data: {}'.format(file_name)) im = io_analyze.load(file_name) file_gt = config.dir_reg + test + 'seg.hdr' seg = io_analyze.load(file_gt) seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0] ## normalize image nim = im/np.std(im) ## init anchor_api anchor_api = MetaAnchor( prior=prior, prior_models=self.prior_models, prior_weights=self.prior_weights, image=nim, ) ## start segmenting #import ipdb; ipdb.set_trace() sol,y = rwsegment.segment( nim, anchor_api, seeds=seeds, labelset=self.labelset, weight_function=self.weight_function, **self.params ) ## compute losses z = seg.ravel()==np.c_[self.labelset] flatmask = mask.ravel()*np.ones((len(self.labelset),1)) ## loss 0 : 1 - Dice(y,z) loss0 = loss_functions.ideal_loss(z,y,mask=flatmask) logger.info('Tloss = {}'.format(loss0)) ## loss2: squared difference with ztilde loss1 = loss_functions.anchor_loss(z,y,mask=flatmask) logger.info('SDloss = {}'.format(loss1)) ## loss3: laplacian loss loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask) logger.info('LAPloss = {}'.format(loss2)) ## loss4: linear loss loss3 = loss_functions.linear_loss(z,y,mask=flatmask) logger.info('LINloss = {}'.format(loss3)) ## compute Dice coefficient per label dice = compute_dice_coef(sol, seg,labelset=self.labelset) logger.info('Dice: {}'.format(dice)) if not config.debug: if fold is not None: test_name = 'f{}_{}'.format(fold[0][:2], test) else: test_name = test outdir = config.dir_seg + \ '/{}/{}'.format(self.model_name,test_name) logger.info('saving data in: {}'.format(outdir)) if not os.path.isdir(outdir): os.makedirs(outdir) f = open(outdir + 'losses.txt', 'w') f.write('ideal_loss\t{}\n'.format(loss0)) f.write('anchor_loss\t{}\n'.format(loss1)) f.write('laplacian_loss\t{}\n'.format(loss2)) f.close() io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) np.savetxt( outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
def compute_mean_segmentation(self, list): for test in list: file_gt = config.dir_reg + test + 'seg.hdr' seg = io_analyze.load(file_gt) seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0] ## get prior prior, mask = load_or_compute_prior_and_mask( test,force_recompute=self.force_recompute_prior) mask = mask.astype(bool) y = np.zeros((len(self.labelset),seg.size)) y[:,0] = 1 y.flat[prior['imask']] = prior['data'] sol = np.zeros(seg.shape,dtype=np.int32) sol[mask] = self.labelset[np.argmax(prior['data'],axis=0)] ## compute losses z = seg.ravel()==np.c_[self.labelset] flatmask = mask.ravel()*np.ones((len(self.labelset),1)) ## loss 0 : 1 - Dice(y,z) loss0 = loss_functions.ideal_loss(z,y,mask=flatmask) logger.info('Tloss = {}'.format(loss0)) ## loss2: squared difference with ztilde #loss1 = loss_functions.anchor_loss(z,y,mask=flatmask) #logger.info('SDloss = {}'.format(loss1)) ## loss3: laplacian loss #loss2 = loss_functions.laplacian_loss(z,y,mask=flatmask) #logger.info('LAPloss = {}'.format(loss2)) ## loss4: linear loss #loss3 = loss_functions.linear_loss(z,y,mask=flatmask) #logger.info('LINloss = {}'.format(loss3)) ## compute Dice coefficient per label dice = compute_dice_coef(sol, seg,labelset=self.labelset) logger.info('Dice: {}'.format(dice)) if not config.debug: outdir = config.dir_seg + \ '/{}/{}'.format('mean',test) logger.info('saving data in: {}'.format(outdir)) if not os.path.isdir(outdir): os.makedirs(outdir) #f = open(outdir + 'losses.txt', 'w') #f.write('ideal_loss\t{}\n'.format(loss0)) #f.write('anchor_loss\t{}\n'.format(loss1)) #f.write('laplacian_loss\t{}\n'.format(loss2)) #f.close() io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) np.savetxt( outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')
def process_sample(self,train, test): outdir = config.dir_work + 'autoseeds/' + config.basis + '/' + train + '/' + test logger.info('saving data in: {}'.format(outdir)) if not os.path.isdir(outdir): os.makedirs(outdir) ## get prior from scipy import ndimage segtrain = io_analyze.load(config.dir_reg + test + train + '/regseg.hdr') segtrain.flat[~np.in1d(segtrain, self.labelset)] = self.labelset[0] struct = np.ones((10,)*segtrain.ndim) mask = ndimage.binary_dilation( segtrain>0, structure=struct, ).astype(bool) #prior, mask = load_or_compute_prior_and_mask( # test,force_recompute=self.force_recompute_prior) #mask = mask.astype(bool) ## load image file_name = config.dir_reg + test + 'gray.hdr' logger.info('segmenting data: {}'.format(file_name)) im = io_analyze.load(file_name).astype(float) file_gt = config.dir_reg + test + 'seg.hdr' seg = io_analyze.load(file_gt) seg.flat[~np.in1d(seg, self.labelset)] = self.labelset[0] ## normalize image nim = im/np.std(im) #orient_scores = self.load_or_compute_orientations(train,test, mask=mask) if 1:#not os.path.isfile(outdir + 'points.npy'): from rwsegment import boundary_utils reload(boundary_utils) ## sample points points = boundary_utils.sample_points(im, self.step, mask=mask) points = points[mask[tuple(points.T)]] impoints = np.zeros(im.shape,dtype=int) impoints[tuple(points.T)] = np.arange(len(points)) + 1 ipoints = np.where(impoints.ravel())[0] points = np.argwhere(impoints) np.save(outdir + 'points.npy', points) impoints[tuple(points.T)] = np.arange(len(points)) + 1 ## set unary potentials from prior: array of unary costs nlabel = len(self.labelset) dist = self.distance_to_train(segtrain, points) T = 10.0 prob_pts = np.exp(-(dist/T)**2) / np.c_[np.sum(np.exp(-(dist/T)**2),axis=1)] #prob = np.c_[np.ones(im.size), np.zeros((im.size, nlabel-1))] #prob[mask.ravel(),:] = prior['data'].T #prob_pts = prob[ipoints,:] np.save(outdir + 'prob_points.npy', prob_pts) ## binary potentials ## compute edges edges,edgev,labels = boundary_utils.get_edges(im, points, mask=mask) edges = np.sort(edges,axis=1) np.save(outdir + 'edges.npy', edges) ## get orientation hist orient_scores,hist = self.load_or_compute_orientations(train,test, mask=mask) ##classify edges vecs = points[edges[:,1]] - points[edges[:,0]] vecs = vecs / np.c_[np.sqrt(np.sum(vecs**2,axis=1))] scores = self.get_orient_scores(vecs) prob_orient = np.dot(scores, orient_scores) #prob_orient = prob_orient/np.c_[np.sum(prob_orient, axis=1)] np.save(outdir + 'prob_orient.npy', prob_orient) ''' ## load classifier classifier = self.load_or_compute_classifier(train,test, mask=mask) ## extract profiles profiles,emap,dists = boundary_utils.get_profiles(nim, points, edges, rad=0) ## make features x = boundary_utils.make_features( profiles, size=self.sizex, additional=[dists,edgev,edgev/dists], ) ## classify cl, scores = classifier.classify(x) ## ground truth z = boundary_utils.is_boundary(points, edges, seg) logger.info('non boundary classification: {}%'\ .format(np.sum((np.r_[z]==0)*(np.r_[cl]==0))/float(np.sum(np.r_[z]==0))*100)) logger.info('boundary classification: {}%'\ .format(np.sum((np.r_[z]==1)*(np.r_[cl]==1))/float(np.sum(np.r_[z]==1))*100)) np.save(outdir + 'classified.npy', cl) ## probabilities prob_edges = 1. - scores/np.c_[np.sum(scores, axis=1)] ##save probs np.save(outdir + 'prob_edges.npy',prob_edges) ''' else: points = np.load(outdir + 'points.npy') edges = np.load(outdir + 'edges.npy') cl = np.load(outdir + 'classified.npy') prob_pts = np.load(outdir + 'prob_points.npy') #prob_edges = np.load(outdir + 'prob_edges.npy') prob_orient = np.load(outdir + 'prob_orient.npy') ## make potentials unary = - np.log(prob_pts + 1e-10) #binary = - np.log(prob_edges + 1e-10) #thresh = (prob_orient.shape[1] - 1.0)/prob_orient.shape[1] thresh = (len(self.orients) - 1.0) / len(self.orients) orient_cost = - np.log(np.clip(prob_orient + thresh,0,1) + 1e-10)*100 orient_cost = np.clip(orient_cost, 0, 1e10) #import ipdb; ipdb.set_trace() ## solve MRF: import ipdb; ipdb.set_trace() ''' from rwsegment.mrf import fastPD class CostFunction(object): def __init__(self,**kwargs): self.binary = kwargs.pop('binary',0) self.orient_indices = kwargs.pop('orient_indices') self.orient_cost = kwargs.pop('orient_cost') def __call__(self,e,l1,l2): idpair = self.orient_indices[l1,l2] pair_cost = self.orient_cost[e,idpair] cost = (l1!=l2)*pair_cost #return (l1!=l2)*(1-cl[e])*0.1 #return (l1!=l2)*self.binary[e,1]*0.1 #y = l1!=l2 #return self.binary[e, y]*pair_cost print e, l1, l2, cost return cost #sol, en = fastPD.fastPD_callback(unary, edges, cost_function(binary), debug=True) cost_function = CostFunction( #binary=binary, orient_indices=self.orient_indices, orient_cost=orient_cost, ) sol, en = fastPD.fastPD_callback(unary, edges, cost_function, debug=True) ''' wpairs = orient_cost from rwsegment.mrf import trw sol, en = trw.TRW_general( unary, edges, wpairs, niters=1000, verbose=True) labels = self.labelset[sol] imsol = np.ones(im.shape, dtype=np.int32)*20 imsol[tuple(points.T)] = labels io_analyze.save(outdir + 'imseeds.hdr', imsol) ## classify sol gtlabels = seg[tuple(points.T)] priorlabels = self.labelset[np.argmin(unary,axis=1)] err_prior = 1 - np.sum(gtlabels==priorlabels)/float(len(points)) err = 1 - np.sum(gtlabels==labels)/float(len(points)) logger.info('error in prior sol: {}%'.format(err_prior*100)) logger.info('error in sol: {}%'.format(err*100)) import ipdb; ipdb.set_trace() ## start segmenting sol,y = rwsegment.segment( nim, seeds=seeds, labelset=self.labelset, weight_function=self.weight_function, **self.params ) ## compute Dice coefficient per label dice = compute_dice_coef(sol, seg,labelset=self.labelset) logger.info('Dice: {}'.format(dice)) if not config.debug: io_analyze.save(outdir + 'sol.hdr', sol.astype(np.int32)) np.savetxt( outdir + 'dice.txt', np.c_[dice.keys(),dice.values()],fmt='%d %.8f')