def segmentGC(pred, beta): ''' This function implements a call to the standard Graph Cut segmentation in the OpenGM library (http://hci.iwr.uni-heidelberg.de/opengm2/). Potts model is assumed, with a 4-neighborhood for 2D data and a 6-neighborhood for 3D data to define the pairwise terms. Parameters: -- pred - the unary terms, used directly (no Log applied, do it outside if needed) This input is assumed to be 3D! -- beta - the weight of the pairwise potentials, usually called lambda Return: -- binary volume, as produced by OpenGM ''' nz, ny, nx = pred.shape numVar = pred.size numLabels = 2 numberOfStates = np.ones(numVar, dtype=opengm.index_type) * numLabels gm = opengm.graphicalModel(numberOfStates, operator='adder') # Adding unary function and factors functions = np.zeros((numVar, 2)) predflat = pred.reshape((numVar, 1)) if (predflat.dtype == np.uint8): predflat = predflat.astype(np.float32) predflat = predflat / 256. functions[:, 0] = predflat[:, 0] functions[:, 1] = 1 - predflat[:, 0] unary_fids = gm.addFunctions(functions) gm.addFactors(unary_fids, np.arange(0, numVar)) # add one binary function (potts fuction) potts = opengm.PottsFunction([2, 2], 0.0, beta) binary_fid = gm.addFunction(potts) # add binary factors indices = np.arange(numVar, dtype=np.uint32).reshape((nz, ny, nx)) z_edges = np.concatenate([indices[:nz - 1, :, :], indices[1:, :, :]] ).reshape((2, (nz - 1) * ny * nx)).transpose() y_edges = np.concatenate([indices[:, :ny - 1, :], indices[:, 1:, :]] ).reshape((2, nz * (ny - 1) * nx)).transpose() x_edges = np.concatenate([indices[:, :, :nx - 1], indices[:, :, 1:]] ).reshape((2, nz * ny * (nx - 1))).transpose() gm.addFactors(binary_fid, z_edges) gm.addFactors(binary_fid, y_edges) gm.addFactors(binary_fid, x_edges) grcut = opengm.inference.GraphCut(gm) grcut.infer() argmin = grcut.arg() res = argmin.reshape((nz, ny, nx)) if hasattr(pred, 'axistags'): res = vigra.taggedView(res, pred.axistags) return res
def segment_adjacency_graph(segment_unaries, segment_map, segment_regularizer=None): """ Creates a region adjacency graph. Each segment has a variable and adjacent segments are linked by edges """ _check_valid_segment_map(segment_map) _check_compatible_segment_map_unaries(segment_map, segment_unaries) n_vars = segment_unaries.shape[0] n_labels = segment_unaries.shape[-1] edges = segment_map_to_rag_edges(segment_map) n_edges = edges.shape[0] # allocate space for the model and all its variables gm = opengm.graphicalModel([n_labels] * n_vars) gm.reserveFunctions( n_vars + n_edges, 'explicit') # the unary functions plus the 3 types of regularizer gm.reserveFactors(n_vars + n_edges) gm = add_layer(gm, segment_unaries, edges, segment_regularizer) gm.finalize() return gm
def test_constructor_list(self): numberOfStates=[2,3,4] gm=opengm.graphicalModel(numberOfStates,operator="adder") assert(gm.numberOfVariables==3) assert(gm.numberOfLabels(0)==2) assert(gm.numberOfLabels(1)==3) assert(gm.numberOfLabels(2)==4)
def _constructOpenGMModel(self): openGMModel = opengm.graphicalModel(self.cardinalities, operator="adder") for factor in self.factors: members = tuple(map(int, list(factor.members))) func = openGMModel.addFunction(factor.values) openGMModel.addFactor(func, members) return openGMModel
def test_constructor_numpy(self): numberOfStates=numpy.ones(3,dtype=numpy.uint64) numberOfStates[0]=2 numberOfStates[1]=3 numberOfStates[2]=4 gm=opengm.graphicalModel(numberOfStates) assert(gm.numberOfVariables==3) assert(gm.numberOfLabels(0)==2) assert(gm.numberOfLabels(1)==3) assert(gm.numberOfLabels(2)==4)
def fit(self, mapc, th, enc, lambda0): ''' create a model for the decision fo the word ''' import opengm N = len(mapc) K = self.K numLabel = [K + 1] * N self.gm = opengm.graphicalModel(numLabel) i0 = np.argsort(mapc, axis=0)[:, 0] self.indice = i0 v = [] unary = [] for i in i0: p = mapc[i][1] Eu = [] for k in range(self.K): Eu.append(1 - p[k]) Eu.append(mapc[i][2]) #Eu.append(max(p)) v.append([mapc[i][0], Eu, mapc[i][3]]) unary.append(Eu) self.vertices = v unary = np.array(unary) assert (unary.shape == (N, K + 1)) fid = self.gm.addFunctions(unary) vis = np.arange(0, N, dtype=np.uint64) self.gm.addFactors(fid, vis) self.edges = [] self.overlap = np.zeros((N, N)) for i, v1 in enumerate(self.vertices): for j, v2 in enumerate(self.vertices[i + 1:]): dx = abs(v2[0][0] - v1[0][0]) dy = abs(v2[0][1] - v1[0][1]) w = min(v1[2][0], v2[2][0]) h = min(v1[2][1], v2[2][0]) if dx < th * w and dy < th * h: intersec = (w - min(w, abs(v2[0][0] - v1[0][0]))) intersec *= (h - min(h, abs(v2[0][1] - v1[0][1]))) intersec *= 100. / (w * h) v0 = lambda0 * np.exp(-(100 - intersec)**2) BinaryE = np.ones((K + 1, K + 1)) * v0 BinaryE += self.prior BinaryE[K, K] = 0 fid = self.gm.addFunction(BinaryE) self.edges.append((i, j + i + 1, intersec)) self.gm.addFactor(fid, [i, j + i + 1])
def potts_lattice_graph(pixel_unaries, beta): n_vars = pixel_unaries.shape[0] * pixel_unaries.shape[1] n_labels = pixel_unaries.shape[-1] n_edges = calc_n_pixel_edges(pixel_unaries.shape) gm = opengm.graphicalModel([n_labels] * n_vars) gm.reserveFunctions( n_vars + 1, 'explicit') # the unary functions plus the 1 type of regularizer gm.reserveFactors(n_vars + n_edges) gm = add_potts_lattice_layer(gm, pixel_unaries, beta) gm.finalize() return gm
def makeModel(img, gt): shape = gt.shape[0:2] numVar = shape[0] * shape[1] # make model gm = graphicalModel(numpy.ones(numVar) * numberOfLabels) # compute features unaryFeat = getFeat(fUnary, img) unaryFeat = numpy.nan_to_num( numpy.concatenate(unaryFeat, axis=2).view(numpy.ndarray)) unaryFeat = unaryFeat.reshape([numVar, -1]) # add unaries lUnaries = lUnaryFunctions( weights=weights, numberOfLabels=numberOfLabels, features=unaryFeat, weightIds=uWeightIds, featurePolicy=FeaturePolicy.sharedBetweenLabels, makeFirstEntryConst=numberOfLabels == 2, addConstFeature=addConstFeature) fids = gm.addFunctions(lUnaries) gm.addFactors(fids, numpy.arange(numVar)) if len(fBinary) > 0: binaryFeat = getFeat(fBinary, img) binaryFeat = numpy.nan_to_num( numpy.concatenate(binaryFeat, axis=2).view(numpy.ndarray)) binaryFeat = binaryFeat.reshape([numVar, -1]) # add second order vis2Order = gridVis(shape[0:2], True) fU = binaryFeat[vis2Order[:, 0], :] fV = binaryFeat[vis2Order[:, 1], :] fB = (fU + fV / 2.0) lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels, features=fB, weightIds=bWeightIds, addConstFeature=addConstFeature) gm.addFactors(gm.addFunctions(lp), vis2Order) return gm
def buildGM(img,rag,dataImage,numLabels,boundaryPixels,regionPixels,beta,sigma,verbose=False): print "get region clustering" regionFeatures=numpy.ones([rag.numberOfRegions(),3],dtype=numpy.float64) print "lab type in gm" ,type(img) print "lab type in gm shape" ,img.shape npimg=numpy.ones(img.shape) npimg[:,:,:]=img[:,:,:] print "npimg type in gm shape" ,npimg.shape for r in range(rag.numberOfRegions()): for c in range(3): regionFeatures[r,c]=numpy.mean(npimg[regionPixels[r][:,0],regionPixels[r][:,1]][c]) print "do clustering" code,dists=doClustering(regionFeatures,k=numLabels,steps=100) dists=(dists-dists.min())/(dists.max()-dists.min()) if verbose==True : print "get boundary evidence" boundaryEvidence=numpy.ones(rag.numberOfBoundaries(),dtype=numpy.float64) be=numpy.ones([rag.numberOfBoundaries(),2],dtype=numpy.float64) energy=numpy.ones([rag.numberOfBoundaries(),2],dtype=numpy.float64) for b in range(rag.numberOfBoundaries()): boundaryEvidence[b]=numpy.mean(dataImage[boundaryPixels[b][:,0],boundaryPixels[b][:,1]]) r=rag.adjacentRegions(b) boundaryEvidence=(boundaryEvidence-boundaryEvidence.min())/(boundaryEvidence.max()-boundaryEvidence.min())*(1.0-2.0*epsilon) + epsilon be[:,1]=numpy.exp(-1.0*boundaryEvidence[:]*sigma) be[:,0]=1.0-be[:,1] energy [:,0]= (-1.0*numpy.log( (1)*(1.0-beta) ) ) +be[:,0] energy [:,1]= (-1.0*numpy.log( (1)*(beta) ) ) +be[:,1] if verbose==True : print "build gm" gm=opengm.graphicalModel(numpy.ones(rag.numberOfRegions(),dtype=numpy.uint64)*numLabels) shapePotts=[numLabels,numLabels] print "add unaries" for r in range(rag.numberOfRegions()): f=dists[r,:]*gamma vis=[r] gm.addFactor(gm.addFunction(f),vis) print "add 2.order" for b in range(rag.numberOfBoundaries()): f=opengm.pottsFunction(shapePotts, energy[b,0] ,energy[b,1]) vis=rag.adjacentRegions(b) gm.addFactor(gm.addFunction(f),vis) return gm
def makeModel(img,gt): shape = gt.shape[0:2] numVar = shape[0] * shape[1] # make model gm = graphicalModel(numpy.ones(numVar)*numberOfLabels) # compute features unaryFeat = getFeat(fUnary, img) unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray)) unaryFeat = unaryFeat.reshape([numVar,-1]) # add unaries lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, features=unaryFeat, weightIds = uWeightIds, featurePolicy= FeaturePolicy.sharedBetweenLabels, makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature) fids = gm.addFunctions(lUnaries) gm.addFactors(fids, numpy.arange(numVar)) if len(fBinary)>0: binaryFeat = getFeat(fBinary, img) binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray)) binaryFeat = binaryFeat.reshape([numVar,-1]) # add second order vis2Order=gridVis(shape[0:2],True) fU = binaryFeat[vis2Order[:,0],:] fV = binaryFeat[vis2Order[:,1],:] fB = (fU + fV / 2.0) lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels, features=fB, weightIds=bWeightIds, addConstFeature=addConstFeature) gm.addFactors(gm.addFunctions(lp), vis2Order) return gm
def getGraphicalModel(words): #print 1 noNodes = sum(map(lambda x : 1 if x is not None else 0, words)) word_factors_list = [] no_of_states = [] for word in words: if word is not None: factors = get_unary_factors(word) word_factors_list.append(factors) no_of_states.append(len(factors.keys())) #print 2 gm = opengm.graphicalModel(no_of_states) # Add unary factor nodes for each word factor. for i, word_factors in enumerate(word_factors_list): factor_handle = gm.addFunction(np.array(word_factors.values())) gm.addFactor(factor_handle, i) #print 3 # TODO: Assuming that relation exists only left to right for i in range(len(word_factors_list) - 1): # TODO: Just getting the similarity score. words_i = word_factors_list[i].keys() words_i1 = word_factors_list[i + 1].keys() binary_func = [] for word_a in words_i: word_a_values = [] for word_b in words_i1: word_a_values.append(cos_sim.get_sim(final_model.get_row(word_a), final_model.get_row(word_b))) binary_func.append(word_a_values) factor_handle = gm.addFunction(np.array(binary_func)) gm.addFactor(factor_handle, [i, i+1]) #print 4 #opengm.visualizeGm(gm) inf = opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.05)) inf.infer() #print 5 return inf
def makeModel(img,sp,gt): assert sp.min() == 0 shape = img.shape[0:2] gg = vigra.graphs.gridGraph(shape) rag = vigra.graphs.regionAdjacencyGraph(gg,sp) numVar = rag.nodeNum assert rag.nodeNum == rag.maxNodeId +1 # make model gm = graphicalModel(numpy.ones(numVar)*numberOfLabels) assert gm.numberOfVariables == rag.nodeNum assert gm.numberOfVariables == rag.maxNodeId +1 # compute features unaryFeat = getFeat(fUnary, img) unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray)).astype('float32') unaryFeat = vigra.taggedView(unaryFeat,'xyc') accList = [] #for c in range(unaryFeat.shape[-1]): # cUnaryFeat = unaryFeat[:,:,c] # cAccFeat = rag.accumulateNodeFeatures(cUnaryFeat)[:,None] # accList.append(cAccFeat) #accUnaryFeat = numpy.concatenate(accList,axis=1) accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat)#[:,None] #print accUnaryFeat.shape #accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat[:,:,:]) #accUnaryFeat = vigra.taggedView(accUnaryFeat,'nc') #accUnaryFeat = accUnaryFeat[1:accUnaryFeat.shape[0],:] #binaryFeat = binaryFeat.reshape([numVar,-1]) # add unaries lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, features=accUnaryFeat, weightIds = uWeightIds, featurePolicy= FeaturePolicy.sharedBetweenLabels, makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature) fids = gm.addFunctions(lUnaries) gm.addFactors(fids, numpy.arange(numVar)) if len(fBinary)>0: binaryFeat = getFeat(fBinary, img, topoShape=False) binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray)).astype('float32') edgeFeat = vigra.graphs.edgeFeaturesFromImage(gg, binaryFeat) accBinaryFeat = rag.accumulateEdgeFeatures(edgeFeat) uvIds = numpy.sort(rag.uvIds(), axis=1) assert uvIds.min()==0 assert uvIds.max()==gm.numberOfVariables-1 lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels, features=accBinaryFeat, weightIds=bWeightIds, addConstFeature=addConstFeature) fids = gm.addFunctions(lp) gm.addFactors(fids, uvIds) return gm
def makeModel(img, sp, gt): assert sp.min() == 0 shape = img.shape[0:2] gg = vigra.graphs.gridGraph(shape) rag = vigra.graphs.regionAdjacencyGraph(gg, sp) numVar = rag.nodeNum assert rag.nodeNum == rag.maxNodeId + 1 # make model gm = graphicalModel(numpy.ones(numVar) * numberOfLabels) assert gm.numberOfVariables == rag.nodeNum assert gm.numberOfVariables == rag.maxNodeId + 1 # compute features unaryFeat = getFeat(fUnary, img) unaryFeat = numpy.nan_to_num( numpy.concatenate(unaryFeat, axis=2).view(numpy.ndarray)).astype('float32') unaryFeat = vigra.taggedView(unaryFeat, 'xyc') accList = [] #for c in range(unaryFeat.shape[-1]): # cUnaryFeat = unaryFeat[:,:,c] # cAccFeat = rag.accumulateNodeFeatures(cUnaryFeat)[:,None] # accList.append(cAccFeat) #accUnaryFeat = numpy.concatenate(accList,axis=1) accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat) #[:,None] #print accUnaryFeat.shape #accUnaryFeat = rag.accumulateNodeFeatures(unaryFeat[:,:,:]) #accUnaryFeat = vigra.taggedView(accUnaryFeat,'nc') #accUnaryFeat = accUnaryFeat[1:accUnaryFeat.shape[0],:] #binaryFeat = binaryFeat.reshape([numVar,-1]) # add unaries lUnaries = lUnaryFunctions( weights=weights, numberOfLabels=numberOfLabels, features=accUnaryFeat, weightIds=uWeightIds, featurePolicy=FeaturePolicy.sharedBetweenLabels, makeFirstEntryConst=numberOfLabels == 2, addConstFeature=addConstFeature) fids = gm.addFunctions(lUnaries) gm.addFactors(fids, numpy.arange(numVar)) if len(fBinary) > 0: binaryFeat = getFeat(fBinary, img, topoShape=False) binaryFeat = numpy.nan_to_num( numpy.concatenate(binaryFeat, axis=2).view( numpy.ndarray)).astype('float32') edgeFeat = vigra.graphs.edgeFeaturesFromImage(gg, binaryFeat) accBinaryFeat = rag.accumulateEdgeFeatures(edgeFeat) uvIds = numpy.sort(rag.uvIds(), axis=1) assert uvIds.min() == 0 assert uvIds.max() == gm.numberOfVariables - 1 lp = lPottsFunctions(weights=weights, numberOfLabels=numberOfLabels, features=accBinaryFeat, weightIds=bWeightIds, addConstFeature=addConstFeature) fids = gm.addFunctions(lp) gm.addFactors(fids, uvIds) return gm
import numpy import opengm img = numpy.random.rand(4, 4) dimx = img.shape[0] dimy = img.shape[1] numVar = dimx * dimy numLabels = 2 beta = 0.3 numberOfStates = numpy.ones(numVar, dtype=opengm.index_type) * numLabels gm = opengm.graphicalModel(numberOfStates, operator='adder') #Adding unary function and factors for y in range(dimy): for x in range(dimx): f = numpy.ones(2, dtype=numpy.float32) f[0] = img[x, y] f[1] = 1.0 - img[x, y] fid = gm.addFunction(f) gm.addFactor(fid, (x * dimy + y, )) #Adding binary function and factors" vis = numpy.ones(5, dtype=opengm.index_type) #add one binary function (potts fuction) f = numpy.ones(pow(numLabels, 2), dtype=numpy.float32).reshape( numLabels, numLabels) * beta for l in range(numLabels): f[l, l] = 0 fid = gm.addFunction(f)
dimx=100 dimy=100 numVar=dimx*dimy numLabels=20 beta=0.8 # --------------------------------- # reserve factors and functions can # save a lot of time # --------------------------------- t=time.time() numberOfStates=numpy.ones(numVar,dtype=opengm.index_type)*numLabels gm=opengm.graphicalModel(numberOfStates,operator='adder') #Adding unary function and factors for y in range(dimy): for x in range(dimx): f1=numpy.random.random(numLabels).astype(numpy.float32) fid=gm.addFunction( f1) gm.addFactor(fid,(x+dimx*y,)) #Adding binary function and factors" vis=numpy.ones(5,dtype=opengm.index_type) #add one binary function (potts fuction) f=numpy.ones(pow(numLabels,2)).reshape(numLabels,numLabels)*beta for l in range(numLabels): f[l,l]=0 fid=gm.addFunction(f) #add binary factors for y in range(dimy):
def segment_overlap_graph(pixel_unaries, segment_map, segment_unaries, pixel_regularizer=None, segment_regularizer=None, inter_layer_regularizer=None): """ greates a graphical model comprised of two layers. - The first layer is a pixel lattice - The second layer is a region adjacency graph over segments - Connections exist between pixels where they are overlapped by a segment Parameters: - pixel_unaries - a 3D array of shape (width, height, n_labels). - segment_map - a 2d array of shape (width, height). Each element >= 0 is a segment id that maps the corresponding pixel to that id. -1 represents no segment and no corresponding node will be added - segment_unaries - a 2d arry of shape (n_segments, n_labels) - pixel_regularizer (optional) - a pairwise opengm function e.g. opengm.PottsFunction([2,2],0.0,beta) or list of opengm functions of length n_pixels - segment_regularizer (optional) - a pairwise opengm function, same requirements as pixel_regularizer - inter_layer_regularizer (optional) - a pairwise opengm function, same requirements as pixel_regularizer """ _check_valid_segment_map(segment_map) _check_compatible_segment_map_unaries(segment_map, segment_unaries) # calculate how many variables and factors will be required n_pixels = pixel_unaries.shape[0] * pixel_unaries.shape[1] n_segments = segment_unaries.shape[0] n_variables = n_pixels + n_segments n_labels_pixels = pixel_unaries.shape[-1] n_labels_segments = segment_unaries.shape[-1] # calculate the region adjacency graph for the segments rag_edges = segment_map_to_rag_edges(segment_map) rag_edges += n_pixels #segment indices start at n_pixels remember! n_pixel_edges = (pixel_unaries.shape[0] - 1) * pixel_unaries.shape[1] + ( pixel_unaries.shape[1] - 1) * pixel_unaries.shape[0] n_segment_edges = rag_edges.shape[0] #check this is right n_inter_edges = n_pixels n_edges = n_pixel_edges + n_segment_edges + n_inter_edges # allocate space for the model and all its variables gm = opengm.graphicalModel([n_labels_pixels] * n_pixels + [n_labels_segments] * n_segments) gm.reserveFunctions( n_variables + 3, 'explicit') # the unary functions plus the 3 types of regularizer gm.reserveFactors(n_variables + n_edges) # add unary functions and factors fids = gm.addFunctions(pixel_unaries.reshape([n_pixels, n_labels_pixels])) gm.addFactors(fids, np.arange(n_pixels), finalize=False) fids = gm.addFunctions(segment_unaries) gm.addFactors(fids, n_pixels + np.arange(n_segments), finalize=False) ## add pairwise functions # pixel lattice if pixel_regularizer is not None: fid = gm.addFunction(pixel_regularizer) vis = opengm.secondOrderGridVis(pixel_unaries.shape[0], pixel_unaries.shape[1]) gm.addFactors(fid, vis, finalize=False) # segment rag if segment_regularizer is not None: fid = gm.addFunction(segment_regularizer) gm.addFactors(fid, np.sort(rag_edges, axis=1), finalize=False) # inter-layer if inter_layer_regularizer is not None: fid = gm.addFunction(inter_layer_regularizer) vis = np.dstack([ np.arange(n_pixels).reshape(pixel_unaries.shape[:2]), segment_map ]).reshape((-1, 2)) vis = _remove_rows_with_negative(vis) vis[:, 1] += n_pixels gm.addFactors(fid, vis, finalize=False) gm.finalize() return gm
def build_factor_graph(G, nodes, edges, n_annots, n_names, lookup_annot_idx, use_unaries=True, edge_probs=None, operator='multiplier'): node_state_card = np.ones(n_annots, dtype=index_type) * n_names numberOfStates = node_state_card # n_edges = len(edges) # n_edge_states = 2 # edge_state_card = np.ones(n_edges, dtype=index_type) * n_edge_states # numberOfStates = np.hstack([node_state_card, edge_state_card]) # gm = opengm.graphicalModel(numberOfStates, operator='adder') gm = opengm.graphicalModel(numberOfStates, operator=operator) annot_idxs = list(range(n_annots)) # edge_idxs = list(range(n_annots, n_annots + n_edges)) import scipy.special if use_unaries: unaries = np.ones((n_annots, n_names)) / n_names # unaries[0][0] = 1 # unaries[0][1:] = 0 for annot_idx in annot_idxs: fid = gm.addFunction(unaries[annot_idx]) gm.addFactor(fid, annot_idx) # Add Potts function for each edge pairwise_factor_idxs = [] for count, (aid1, aid2) in enumerate(edges, start=len(list(gm.factors()))): varx1, varx2 = ut.take(lookup_annot_idx, [aid1, aid2]) var_indicies = np.array([varx1, varx2]) if edge_probs is None: p_same, p_diff = get_edge_id_probs(G, aid1, aid2, n_names) else: p_same, p_diff = edge_probs[count] use_logit = operator == 'adder' if use_logit: eps = 1E-9 p_same = np.clip(p_same, eps, 1.0 - eps) same_weight = scipy.special.logit(p_same) # valueEqual = -same_weight valueEqual = 0 valueNotEqual = same_weight if not np.isfinite(valueNotEqual): """ python -m plottool.draw_func2 --exec-plot_func --show --range=-1,1 --func=scipy.special.logit """ print('valueNotEqual = %r' % (valueNotEqual, )) print('p_same = %r' % (p_same, )) raise ValueError('valueNotEqual') else: valueEqual = p_same valueNotEqual = p_diff p_same, p_diff = get_edge_id_probs(G, aid1, aid2, n_names) pairwise_factor_idxs.append(count) potts_func = opengm.PottsFunction((n_names, n_names), valueEqual=valueEqual, valueNotEqual=valueNotEqual) potts_func_id = gm.addFunction(potts_func) gm.addFactor(potts_func_id, var_indicies) gm.pairwise_factor_idxs = pairwise_factor_idxs gm.G = G return gm
def infer(self): logging.info("Running Inference") ########################### # define binary variables # ########################### # two binary variables: #var 0 (stage S1): dimension is 2 #var 1 (event E1): dimension is 2 variables = [2, 2] ################################ # # construct the Factor Graph # ################################ gm = opengm.graphicalModel(variables, operator='multiplier') ######################################################################################## # TODO: Fill in values in g_func, and g_var, according to the provided tables # ######################################################################################## f_func = np.array([0.1, 0.9]) # priors f f_var = [0] # f(S1) using S1 as variable 0 g_func = np.array([[0, 0.2], [0, 0.5]]) # factor function g g_var = [0, 1] # g(S1, S2) ############ # END TODO # ############ ################################## # connect factor functions to FG # ################################## gm.addFactor(gm.addFunction(f_func), f_var) # add prior to event gm.addFactor(gm.addFunction(g_func), g_var) # add factor function to event (E1) and stage (S1) ################################## # # belief propagation inference # ################################## inf = opengm.inference.BeliefPropagation(gm, accumulator='maximizer') inf.infer() ############## # get argmax # ############## arg = inf.arg() print("Inference result: ", arg) ############################## # get marginal probabilities # ############################## marginals = inf.marginals(range(len(variables))) ############################################################# # # get marginal of the state variable (variable index = 0) # ############################################################# vars = [0] max_marg = marginals[0] max_val = 0 for i in vars: marginals_xi = marginals[i] if marginals_xi > max_marg: max_val = i max_marg = marginals_xi marginals_xi /= np.sum(marginals_xi) print("x_{} marginal: {}".format(i, marginals_xi)) pass
def classifyTissue(heatingParam): numLabels = 3 dxdy, noParam = heatingParam.shape heatingParam = np.reshape(heatingParam, (640, 480, noParam)) shape = heatingParam.shape dimx, dimy, noParam = shape[0], shape[1], shape[2] numVar = dimx * dimy dimx, dimy = shape[0], shape[1] numberOfStates = np.ones(numVar, dtype=opengm.index_type) * numLabels gm = opengm.graphicalModel(numberOfStates) # create unary potentials for CRF f = np.ones(numLabels, dtype=np.float32) for y in range(dimy): for x in range(dimx): f = np.ones(numLabels, dtype=np.float32) beta = abs(heatingParam[x, y, 2] - heatingParam[x, y, 3]) f[0] = abs(heatingParam[x, y, 2]) # * (1/beta) f[1] = beta f[2] = abs(1 + heatingParam[x, y, 2]) # * (1/beta) fid = gm.addFunction(f) gm.addFactor(fid, (x * dimy + y,)) # create pairwise potentials for y in range(dimy): for x in range(dimx): f_pw = np.ones(numLabels * numLabels, dtype=np.float32).reshape(numLabels, numLabels) if (x+1 < dimx): beta_i = abs(heatingParam[x, y, 2] - heatingParam[x, y, 3]) beta_j = abs(heatingParam[x+1, y, 2] - heatingParam[x+1, y, 3]) scaling_f = 1 / abs(beta_i - beta_j) f_pw[0, 0] = 0 f_pw[1, 1] = 0 f_pw[2, 2] = 0 f_pw[0, 1] = scaling_f f_pw[0, 2] = scaling_f * 2 f_pw[1, 0] = scaling_f f_pw[1, 2] = scaling_f f_pw[2, 0] = f_pw[0, 2] f_pw[2, 1] = f_pw[1, 2] fid = gm.addFunction(f_pw) gm.addFactor(fid, np.array([x * dimy + y, (x + 1) * dimy + y], dtype=opengm.index_type)) if (y+1 < dimy): beta_i = abs(heatingParam[x, y, 2] - heatingParam[x, y, 3]) beta_j = abs(heatingParam[x, y+1, 2] - heatingParam[x, y+1, 3]) scaling_f = 1 / abs(beta_i - beta_j) f_pw[0, 0] = 0 f_pw[1, 1] = 0 f_pw[2, 2] = 0 f_pw[0, 1] = scaling_f f_pw[0, 2] = scaling_f * 2 f_pw[1, 0] = scaling_f f_pw[1, 2] = scaling_f f_pw[2, 0] = f_pw[0, 2] f_pw[2, 1] = f_pw[1, 2] fid = gm.addFunction(f_pw) gm.addFactor(fid, [x * dimy + y, x * dimy + (y + 1)]) parameterBP = opengm.InfParam(steps=noIter,damping=0.5) parameter = opengm.InfParam(steps=noIter) # inf = opengm.inference.GraphCut(gm) # inf = opengm.inference.TrwsExternal(gm, parameter=parameter) # inf = opengm.inference.TreeReweightedBp(gm, parameter=parameter) inf = opengm.inference.BeliefPropagation(gm, parameter=parameterBP) callback = PyCallback((dimx, dimy), numLabels) # visitor = inf.pythonVisitor(callback, visitNth=1) # inf.infer(visitor) print "*** INFERENCE" startTime = time.time() inf.infer() endTime = time.time() labelVector = inf.arg() E = gm.evaluate(labelVector) print "FINAL E " + str(E) + " dt " + str(endTime - startTime) + "s" return labelVector
num_unary_feats = num_labels * X[0][0].shape[1] num_weights = num_unary_feats + num_edge_feats # create and initialize weights print 'num_weights =', num_weights print 'num_instances =', len(X) ogm_ds = learning.createDataset(num_weights, numInstances=len(X), loss="generalized-hamming") weights = ogm_ds.getWeights() for idx, (x, y) in enumerate(zip(X, Y)): y[y==-1]=0 # FIXME: introduce a void label, so long: make the void label background unary_feats, edges, edge_feats = x num_vars = unary_feats.shape[0] states = np.ones(num_vars, dtype=opengm.index_type) * num_labels gm = opengm.graphicalModel(states, operator='adder') lossParam = learning.GeneralizedHammingLossParameter() lossParam.setLabelLossMultiplier(np.array(label_weights)) # add unary factors weight_ids = np.arange(0, num_labels * unary_feats.shape[1]).reshape((num_labels, -1)) for feat_idx, unary_feat in enumerate(unary_feats): # make that each label sees all features, but use their own weights unary_feat_array = np.repeat(unary_feat.reshape((-1,1)), num_labels, axis=1) f = learning.lUnaryFunction(weights, num_labels, unary_feat_array, weight_ids) var_idxs = np.array([feat_idx], dtype=np.uint64) fid = gm.addFunction(f) gm.addFactor(fid, var_idxs) #var_idxs = np.arange(0, num_vars, dtype=np.uint64) #gm.addFactors(fids, var_idxs)
# model parameter gridSize = [3, 3] # size of grid beta = 0.7 # bias to choose between under- and over-segmentation high = 100 # closedness-enforcing soft-constraint value for forbidden configurations # size of the topological grid tGridSize = [2 * gridSize[0] - 1, 2 * gridSize[1] - 1] nrOfVariables = gridSize[1] * (gridSize[0] - 1) + gridSize[0] * (gridSize[1] - 1) cToVi = TopologicalCoordinateToIndex(gridSize) # some random data on a grid data = numpy.random.random(gridSize[0] * gridSize[1]).astype( numpy.float32).reshape(gridSize[0], gridSize[1]) # construct gm numberOfLabels = numpy.ones(nrOfVariables, dtype=opengm.label_type) * 2 gm = opengm.graphicalModel(numberOfLabels) # 4th closedness-function fClosedness = numpy.zeros(pow(2, 4), dtype=numpy.float32).reshape(2, 2, 2, 2) for x1 in range(2): for x2 in range(2): for x3 in range(2): for x4 in range(2): labelsum = x1 + x2 + x3 + x4 if labelsum is not 2 and labelsum is not 0: fClosedness[x1, x2, x3, x4] = high fidClosedness = gm.addFunction(fClosedness) # for each boundary in the grid, i.e. for each variable # of the model, add one 1st order functions # and one 1st order factor # and for each junction of four inter-pixel edges on the grid,
sys.stdout.write("\n") # model parameter gridSize=[10,10] # size of grid beta=0.7 # bias to choose between under- and over-segmentation high=100 # closedness-enforcing soft-constraint value for forbidden configurations # size of the topological grid tGridSize=[2*gridSize[0] -1,2*gridSize[1] -1] nrOfVariables=gridSize[1]*(gridSize[0]-1)+gridSize[0]*(gridSize[1]-1) cToVi=TopologicalCoordinateToIndex(gridSize) # some random data on a grid data=numpy.random.random(gridSize[0]*gridSize[1]).astype(numpy.float32).reshape(gridSize[0],gridSize[1]) # construct gm numberOfLabels=numpy.ones(nrOfVariables,dtype=numpy.uint64)*2 gm=opengm.graphicalModel(numberOfLabels) # 4th closedness-function fClosedness=numpy.zeros( pow(2,4),dtype=numpy.float32).reshape(2,2,2,2) for x1 in range(2): for x2 in range(2): for x3 in range(2): for x4 in range(2): labelsum=x1+x2+x3+x4 if labelsum is not 2 and labelsum is not 0 : fClosedness[x1,x2,x3,x4]=high fidClosedness=gm.addFunction(fClosedness) # for each boundary in the grid, i.e. for each variable # of the model, add one 1st order functions # and one 1st order factor # and for each junction of four inter-pixel edges on the grid,
def _update_state_opengm(model, weight_key='cut_prob', name_label_key='name_label'): import opengm import scipy.special graph = model.graph n_annots = len(model.graph) n_names = n_annots nodes = sorted(graph.nodes()) edges = [tuple(sorted(e)) for e in graph.edges()] edges = ut.sortedby2(edges, edges) index_type = opengm.index_type node_state_card = np.ones(n_annots, dtype=index_type) * n_names numberOfStates = node_state_card annot_idxs = list(range(n_annots)) lookup_annot_idx = ut.dzip(nodes, annot_idxs) gm = opengm.graphicalModel(numberOfStates, operator='adder') # annot_idxs = list(range(n_annots)) # edge_idxs = list(range(n_annots, n_annots + n_edges)) # if use_unaries: # unaries = np.ones((n_annots, n_names)) / n_names # # unaries[0][0] = 1 # # unaries[0][1:] = 0 # for annot_idx in annot_idxs: # fid = gm.addFunction(unaries[annot_idx]) # gm.addFactor(fid, annot_idx) # Add Potts function for each edge pairwise_factor_idxs = [] for count, (aid1, aid2) in enumerate(edges, start=len(list(gm.factors()))): varx1, varx2 = ut.take(lookup_annot_idx, [aid1, aid2]) var_indicies = np.array([varx1, varx2]) p_same = graph.get_edge_data(aid1, aid2)['cut_prob'] # p_diff = 1 - p_same eps = 1E-9 p_same = np.clip(p_same, eps, 1.0 - eps) same_weight = scipy.special.logit(p_same) # valueEqual = -same_weight valueEqual = 0 valueNotEqual = same_weight if not np.isfinite(valueNotEqual): """ python -m plottool.draw_func2 --exec-plot_func --show --range=-1,1 --func=scipy.special.logit """ print('valueNotEqual = %r' % (valueNotEqual, )) print('p_same = %r' % (p_same, )) raise ValueError('valueNotEqual') pairwise_factor_idxs.append(count) potts_func = opengm.PottsFunction((n_names, n_names), valueEqual=valueEqual, valueNotEqual=valueNotEqual) potts_func_id = gm.addFunction(potts_func) gm.addFactor(potts_func_id, var_indicies) model.gm = gm
matplot.subplot(2, 2, 3) matplot.imshow(N_spec2) matplot.title('speuclar theta 2') matplot.show() N_solutions = np.dstack((N_diff_solutions, N_spec_solutions)) Diff_flag = np.hstack((Diff_flag1, Diff_flag2)) rows, cols = mask1.shape # matplot.imshow(N) # matplot.show() ##=================> Optimised based on opengm noofNodes = np.sum(mask1) nodeStates = np.ones(noofNodes, dtype=opengm.index_type) * 4 # Possible answer are N or T*N gm = opengm.graphicalModel(nodeStates) # gm = opengm.adder.GraphicalModel(np.ones(noofNodes, dtype=opengm.index_type), reserveNumFactorsPerVariable=3) spec_threshold = 0.99 specmask_valid = specmask[mask1 == 1] flip_factor = 7 w_u = 1 # 1. add node factor for i in range(noofNodes): nState = np.int32(nodeStates[i]) f = np.zeros(nState, dtype=np.float32) Ng_i = N_guide_valid[i, :] if np.any(np.isnan(Ng_i)): for k in range(nState):
numVar = dimx * dimy numLabels = 2 numberOfStates = numpy.ones(numVar, dtype=opengm.index_type) * numLabels vis2Order = opengm.secondOrderGridVis(dimx, dimy) numFac = len(vis2Order) randf = numpy.random.rand(numFac, numLabels, numLabels).astype(numpy.float64) print randf.shape print "numVar", numVar, "numFac", numFac print "# METHOD A" with opengm.Timer(): gm = opengm.graphicalModel(numberOfStates, operator="adder", reserveNumFactorsPerVariable=4) gm.reserveFunctions(numFac, "explicit") fids = gm.addFunctions(randf) gm.addFactors(fids, vis2Order) print "# METHOD B" with opengm.Timer(): # (reserve reserveNumFactorsPerVariable does not make sense if we not "finalize" factors directely) gm = opengm.graphicalModel(numberOfStates, operator="adder") gm.reserveFactors(numFac) gm.reserveFunctions(numFac, "explicit") fids = gm.addFunctions(randf) gm.addFactors(fids, vis2Order, finalize=False) gm.finalize()
print 'num_weights =', num_weights print 'num_instances =', len(X) ogm_ds = learning.createDataset(num_weights, numInstances=len(X), loss="generalized-hamming") weights = ogm_ds.getWeights() for idx, (x, y) in enumerate(zip(X, Y)): y[y == -1] = 0 # FIXME: introduce a void label, so long: make the void label background unary_feats, edges, edge_feats = x num_vars = unary_feats.shape[0] states = np.ones(num_vars, dtype=opengm.index_type) * num_labels gm = opengm.graphicalModel(states, operator='adder') lossParam = learning.GeneralizedHammingLossParameter() lossParam.setLabelLossMultiplier(np.array(label_weights)) # add unary factors weight_ids = np.arange(0, num_labels * unary_feats.shape[1]).reshape( (num_labels, -1)) for feat_idx, unary_feat in enumerate(unary_feats): # make that each label sees all features, but use their own weights unary_feat_array = np.repeat(unary_feat.reshape((-1, 1)), num_labels, axis=1) f = learning.lUnaryFunction(weights, num_labels, unary_feat_array, weight_ids) var_idxs = np.array([feat_idx], dtype=np.uint64)