示例#1
0
    def test_add_factors_generic(self):
        def mygen():
            yield 0
            yield 1

        gm = opengm.gm([2, 4])
        f = opengm.PottsFunction([2, 4], 0.0, 1.0)
        fid = gm.addFunction(f)
        vis_list = [
            [0, 1],
            (0, 1),
            (x for x in xrange(2)),
            mygen(),
            opengm.IndexVector(x for x in xrange(0, 2)),
            numpy.arange(0, 2, dtype=numpy.uint64),
        ]
        for i, vis in enumerate(vis_list):
            fIndex = gm.addFactor(fid, vis)
            assert gm.numberOfFactors == i + 1
            assert fIndex == i
            assert gm[fIndex].numberOfVariables == 2
            assert gm[fIndex].shape[0] == 2
            assert gm[fIndex].shape[1] == 4
            assert gm[fIndex].variableIndices[0] == 0
            assert gm[fIndex].variableIndices[1] == 1
def create_graph(gm_db):
    print("Creating Graph ...")
    all_name_pairs = gm_db['pairwises'].keys()
    all_names = set()
    for n1, n2 in all_name_pairs:
        all_names.add(n1)
        all_names.add(n2)
    node2id = {}
    id2node = {}
    num_labels = gm_db['pairwises'][all_name_pairs[0]].shape[0]
    for i, n in enumerate(all_names):
        node2id[n] = i
        id2node[i] = n
    num_nodes = len(all_names)
    print("Num nodes: {}".format(num_nodes))
    gm = opengm.gm([num_labels] * num_nodes, operator='adder')
    if len(gm_db['unaries']) > 0:
        for i in range(num_nodes):
            gm.addFactor(gm.addFunction(gm_db['unaries'][id2node[i]]), [i])
    for n1, n2 in all_name_pairs:
        id1 = node2id[n1]
        id2 = node2id[n2]
        t = [id1, id2]
        p = gm_db['pairwises'][(n1, n2)]
        if id2 < id1:
            t = [id2, id1]
            p = p.T
        if id1 == id2:
            continue
        gm.addFactor(gm.addFunction(p), t)
    print(" - Done")
    return gm, node2id, id2node
示例#3
0
文件: bv_viewer.py 项目: timoMa/vigra
    def onClickedMulticut(self):

        p1 = self.probs.copy()
        p1 = numpy.clip(p1, 0.005, 1.0-0.005)
        p0 = 1.0 - p1

        weights = numpy.log(p0/p1)

        nVar = self.rag.maxNodeId + 1
        nos = numpy.ones(nVar)*nVar
        gm = opengm.gm(nos)


        uv = self.rag.uvIds()
        uv = numpy.sort(uv,axis=1)
        pf = opengm.pottsFunctions([nVar,nVar], numpy.array([0]),weights)
        fid = gm.addFunctions(pf)
        gm.addFactors(fid,uv)

        pparam = opengm.InfParam(seedFraction=0.05)
        parameter = opengm.InfParam(generator='randomizedWatershed',proposalParam=pparam,numStopIt=10,numIt=3000)
        inf = opengm.inference.IntersectionBased(gm, parameter=parameter)




        inf = opengm.inference.Multicut(gm)
        inf.infer(inf.verboseVisitor())
        arg = inf.arg()

        self.eArg = arg[uv[:,0]]!=arg[uv[:,1]]

        self.ctrlWidget.modeSelectorComboBox.setCurrentIndex(6)
示例#4
0
    def test_add_multiple_functions_order2a(self):
        nVar = 4
        nLabels = 2
        gm = opengm.gm([nLabels] * nVar)

        # add functionS
        fShape =[2,2,2]
        f = opengm.randomFunction(fShape)

        vis=numpy.ones([4,2])
        vis[0,0]=0
        vis[0,1]=1

        vis[1,0]=1
        vis[1,1]=2



        fid = gm.addFunction(f)

        gm.addFactor(fid,[0,1,2])


        assert gm[0][0,0,0]==f[0,0,0]
        assert gm[0][1,0,0]==f[1,0,0]

        assert gm[0][0,1,0]==f[0,1,0]
        assert gm[0][1,1,0]==f[1,1,0]
示例#5
0
def generate_grid(dimx, dimy, labels, beta1, beta2, operator="adder"):
    nos = numpy.ones(dimx * dimy, dtype=numpy.uint64) * labels
    gm = opengm.gm(nos, operator, 0)

    for vi in range(dimx * dimy):
        f1 = numpy.random.random((labels,)).astype(numpy.float64) * 0.6 + 0.2
        assert len(f1.shape) == 1
        assert f1.shape[0] == labels
        fid1 = gm.addFunction(f1)
        gm.addFactor(fid1, (vi,))
    f2 = numpy.ones([labels, labels], dtype=numpy.float64)
    for l in range(labels):
        f2[l, l] = beta1
    fid2 = gm.addFunction(f2)
    for y in range(dimy):
        for x in range(dimx):
            if x + 1 < dimx:
                vis = [x + y * dimx, x + 1 + y * dimx]
                assert vis.sort is not None
                vis.sort
                gm.addFactor(fid2, vis)
            if y + 1 < dimy:
                vis = [x + y * dimx, x + (y + 1) * dimx]
                vis.sort()
                gm.addFactor(fid2, vis)
    return gm
示例#6
0
    def test_add_multiple_functions_order1(self):
        nVar = 4
        nLabels = 2
        gm = opengm.gm([nLabels] * nVar)

        # add functionS
        fShape =[4,2]
        f = opengm.randomFunction(fShape)

        vis=numpy.ones([4,1])
        vis[0,0]=0
        vis[1,0]=1
        vis[2,0]=2
        vis[3,0]=3


        fids = gm.addFunctions(f)
        gm.addFactors(fids,vis)


        assert gm[1][(0,)]==f[1,0]
        assert gm[1][(1,)]==f[1,1]

        for x in xrange(4):
            assert gm[x][(0,)]==f[x,0]
            assert gm[x][(1,)]==f[x,1]
示例#7
0
def instantiate_sentence(sentence_data):
    # Get the number of tokens:
    numVar = len(sentence_data)
    # print 'numVar', numVar

    # The domain of the variables is the len of th y_list
    # Use the multiplier, the adder doesn't seem to work
    gm = opengm.gm([len(y_list)] * numVar, "multiplier")
    # gm=opengm.gm([len(y_list)]*numVar,'adder')
    # print gm.numberOfVariables
    # exit()

    # Create the 1st factors
    gm = create_first_order(gm, sentence_data)
    # print gm.factors()
    # for fac in gm.factors():
    #     print fac

    # Create the 2nd order factors (True for shared factors)
    gm = create_second_order(gm, sentence_data, True)
    # print gm.factors()
    # for fac in gm.factors():
    #     print fac

    # exit()

    return gm
示例#8
0
	def segment(self,weights,warmStart=None,verbose=False):

		#try :
		#	self.cgc.changeWeights(weights)

		#except :
		nVar 	    = self.cgp.numCells(2)
		nFac 		= self.cgp.numCells(1)
		cell1Bounds = self.cgp.cell1BoundsArray()-1
		self.gm2 = opengm.gm(numpy.ones(nVar,dtype=opengm.label_type)*nVar)

		assert self.gm2.numberOfVariables == nVar
		# init with zero potts functions
		#print weights

		pf = opengm.pottsFunctions([nVar,nVar],numpy.zeros(nFac),weights )
		fids = self.gm2.addFunctions(pf)


		# add factors 
		self.gm2.addFactors(fids,cell1Bounds)

		self.cgc2 = opengm.inference.Cgc(gm=self.gm2,parameter=opengm.InfParam(planar=True)) 



		if verbose :
			self.cgc2.infer(self.cgc.verboseVisitor())
		else :
			self.cgc2.infer()
		
		self.labels[:]=self.cgc2.arg()
示例#9
0
    def __init__(self,cgp,beta=0.5):
        self.cgp=cgp
        self.probability =  numpy.ones(cgp.numCells(1),dtype=numpy.float64)
        self.weights     =  numpy.ones(cgp.numCells(1),dtype=numpy.float64)
        self.mean        =  numpy.ones(cgp.numCells(1),dtype=numpy.float64)
        self.std         =  numpy.ones(cgp.numCells(1),dtype=numpy.float64)

        # generate graphical model 
        self.cgc        = None
        self.beta=beta

        boundArray = self.cgp.cell1BoundsArray()-1

        nVar = cgp.numCells(2)
        nFac = cgp.numCells(1)
        space = numpy.ones(nVar,dtype=opengm.label_type)*nVar
        self.gm = opengm.gm(space)

        wZero  = numpy.zeros(nFac,dtype=opengm.value_type)

        pf=opengm.pottsFunctions([nVar,nVar],wZero,wZero)

        fids = self.gm.addFunctions(pf)
        self.gm.addFactors(fids,boundArray)
        self.cgc = opengm.inference.Cgc(gm=self.gm,parameter=opengm.InfParam(planar=True))
示例#10
0
        def predict():
            global features,rag, sp,arg

            trainingInstances = numpy.array(userLabels.keys(),dtype='uint64')
            labels = numpy.array([userLabels[k] for k in trainingInstances],dtype='uint32')[:,None]
            
            trainingInstances = numpy.clip(trainingInstances, 0, rag.maxEdgeId-2)

            assert trainingInstances.max() <= rag.maxEdgeId

            if len(labels)>10 and labels.min()==0 and labels.max()==1: 
                feat = features[trainingInstances,:]
                rf = vigra.learning.RandomForest(treeCount=255)
                oob = rf.learnRF(feat, labels)
                print "oob", oob
                probs = rf.predictProbabilities(features)[:,1]
                p1 = probs.copy()
                p1 = numpy.clip(p1, 0.005, 1.0-0.005)
                p0 = 1.0 - p1

                weights = numpy.log(p0/p1)
                nVar = rag.maxNodeId + 1
                nos = numpy.ones(nVar)*nVar
                gm = opengm.gm(nos)


                uv = rag.uvIds()

                if weights.shape[0] < uv.shape[1]:
                    diff  = uv.shape[1] - weights.shape[0]
                    val = numpy.zeros(diff)
                    weights = numpy.concatenate([weights,val])

                
                uv = numpy.sort(uv,axis=1)
                pf = opengm.pottsFunctions([nVar,nVar], numpy.array([0]),weights)
                fid = gm.addFunctions(pf)
                gm.addFactors(fid,uv)

                param = opengm.InfParam(planar=False)
                inf = opengm.inference.Cgc(gm,parameter=param)
                if arg is not None:
                    inf.setStartingPoint(arg)
                visitor = inf.timingVisitor(timeLimit=60.0)
                inf.infer(visitor)
                arg = inf.arg()
                eArg = arg[uv[:,0]]!=arg[uv[:,1]]


                return arg
                
            else:
                return None
  def infer(self, pairwise_scores, unary_scores, k, mrf_type, filenames, target_class, bcd_scores):
    '''
    Returns:
      - argmin: a list of size k where each element is the label selected for the
        corresponding node.
      - t: runing time for inference
    '''
    assert(bcd_scores is not None)
    import opengm
    num_internal_nodes = self.num_nodes
    num_external_nodes = bcd_scores.shape[0]
    num_labels = int(np.sqrt(pairwise_scores.shape[1]))
    indices = self.get_indices()
    gm = opengm.gm([num_labels]*num_internal_nodes + [1]*num_external_nodes, operator='adder')
    if unary_scores is not None:
      assert unary_scores.shape[0] == k
      assert unary_scores.shape[1] == num_labels
      for i in range(num_internal_nodes):
        gm.addFactor(gm.addFunction(-unary_scores[i]), [i])
    for i,e in enumerate(indices):
      gm.addFactor(gm.addFunction(-pairwise_scores[i].reshape(
                                      (num_labels, num_labels))), [e[0],e[1]])

    for i,j in itertools.product(range(num_external_nodes), range(num_internal_nodes)):
      gm.addFactor(gm.addFunction(-bcd_scores[i,j][...,None]),
                                    [j, i+num_internal_nodes])

    if mrf_type.endswith('astar'):
      inf = opengm.inference.AStar(gm, accumulator='minimizer')
    elif mrf_type.endswith('trws'):
      inf = opengm.inference.TrwsExternal(gm, accumulator='minimizer')
    elif mrf_type.endswith('trbp'):
      inf = opengm.inference.TreeReweightedBp(gm, accumulator='minimizer')
    elif mrf_type.endswith('map'):
      inf = opengm.inference.BeliefPropagation(gm, accumulator='minimizer')
    else:
      raise ValueError('Inference for mrf type {} is not implemented'.format(mrf_type))
    #br_inf = opengm.inference.Bruteforce(gm, accumulator='minimizer')
    #br_inf.infer()
    t0 = time.time()
    inf.infer()
    argmin = inf.arg()
    t1 = time.time() - t0
    energy = inf.value()
    self._debug = dict(gm=gm, pairwise_scores=pairwise_scores,
                       unary_scores=unary_scores,
                       k=k, mrf_type=mrf_type,
                       filenames=filenames, target_class=target_class,
                       bcd_scores=bcd_scores,
                       argmin=argmin,
                       min_energy=energy)
    return argmin[:k], t1, energy
示例#12
0
def intra_encounter_matching():
    import numpy as np
    from scipy.sparse import coo_matrix, csgraph
    qreq_, cm_list = testdata_workflow()
    # qaids = [cm.qaid for cm in cm_list]
    # top_aids = [cm.get_top_aids(5) for cm in cm_list]
    aid_pairs = np.array([(cm.qaid, daid)
                          for cm in cm_list for daid in cm.get_top_aids(5)])
    top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list])

    N = aid_pairs.max() + 1
    mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N))
    csgraph.connected_components(mat)
    tree = csgraph.minimum_spanning_tree(mat)  # NOQA
    import plottool as pt
    dense = mat.todense()
    pt.imshow(dense / dense.max() * 255)
    pt.show_if_requested()

    # baseline jobid
    import opengm
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb
    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(
        workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'),
    )
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
    """
示例#13
0
def test_logistic_as_gm():
	
	# Define the number or features:
	n_features = 5000
	
	# Initialize the FeatureFunction object:
	FF = FeatFunctions(n_features=5000)

	# Create a weights vector, assinging a weight to each feature:
	global theta
	theta = 0.5 * randn(n_features)

	# Get a sentence:
	s0 = 'esto esta chido'

	# Define the feature set for this factor:
	# feat_list_names_factor0 = ['url','all_caps','ngrams']
	feat_list_names_factor0 = ['ngrams']

	# Define the functions for the factors:
	# phi0 = funCreator('phi0','I',feat_list_names_factor0,s0,FF)

	phi0 = funCreator('phi0',['I'],feat_list_names_factor0,s0,FF)
		
	# Initalize a graphical model:
	# For this example, one single variable, logistic regression
	numVars = 1
	cardinality = [3] # Binary classifier
	
	# Create the gm:
	gm=opengm.gm(cardinality*numVars,'multiplier')

	# Transform the function to opengm:
	# The second parameter is a list of the cardinalities of the variables in the scope of this function
	py_func_phi0 = opengm.PythonFunction(phi0,[3])
    
 	# Add the function the pgm
	gm_func_phi0 = gm.addFunction(py_func_phi0)

	# # Add the opengm function to the gm model
	# The second parameter is the [set] of variables in the scope of this factor
	gm.addFactor(gm_func_phi0,0)

	# Run inference to get the variable marginals:
	bp = opengm.inference.BeliefPropagation(gm, 'integrator')
	bp.infer()
	
	# Get a list of all variables in the sentence:
	var_idx = [x for x in xrange(gm.numberOfVariables)]
	marg = bp.marginals(var_idx)
	print 'marg:\n',marg
示例#14
0
def cut_step(G, nodes, edges, n_annots, n_names, lookup_annot_idx, edge_probs, pass_values, fail_values):
    # Create nodes in the graphical model.  In this case there are <num_vars>
    # nodes and each node can be assigned to one of <num_vars> possible labels
    space = np.full((n_annots,), fill_value=n_names, dtype=opengm.index_type)
    gm = opengm.gm(space, operator='adder')

    # Use one potts function for each edge
    gm = build_factor_graph(G, nodes, edges , n_annots, n_names,
                            lookup_annot_idx, use_unaries=False,
                            edge_probs=edge_probs, operator='adder')

    with ut.Indenter('[CUTS]'):
        ut.cprint('Brute Force Labels: (energy minimization)', 'blue')
        infr = opengm.inference.Bruteforce(gm, accumulator='minimizer')
        infr.infer()
        labels = rectify_labels(G, infr.arg())
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(),))

        mc_params = opengm.InfParam(maximalNumberOfConstraintsPerRound=1000000,
                                    initializeWith3Cycles=True,
                                    edgeRoundingValue=1e-08, timeOut=36000000.0,
                                    cutUp=1e+75, reductionMode=3, numThreads=0,
                                    # allowCutsWithin=?
                                    # workflow=workflow
                                    verbose=False, verboseCPLEX=False)
        infr = opengm.inference.Multicut(gm, parameter=mc_params,
                                         accumulator='minimizer')

        infr.infer()
        labels = infr.arg()
        labels = rectify_labels(G, infr.arg())

        ut.cprint('Multicut Labels: (energy minimization)', 'blue')
        print(pd.DataFrame(labels, columns=['nid'], index=pd.Series(nodes)).T)
        print('value = %r' % (infr.value(),))

        if pass_values is not None:
            gotany = False
            for pval in pass_values:
                if all(labels == pval):
                    gotany = True
                    break
            if not gotany:
                ut.cprint('INCORRECT DID NOT GET PASS VALUES', 'red')
                print('pass_values = %r' % (pass_values,))

        if fail_values is not None:
            for fail in fail_values:
                if all(labels == fail):
                    ut.cprint('INCORRECT', 'red')
示例#15
0
    def test_add_multiple_functions_with_map(self):

        gm = opengm.gm([2] * 10)

        def add_a_function(w):
            return gm.addFunction(opengm.differenceFunction(shape=[2, 2], weight=w))

        weights = [0.2, 0.3, 0.4]
        fidList = map(add_a_function, weights)

        assert isinstance(fidList, list)
        assert len(fidList) == len(weights)

        gm.addFactors(fidList, [[0, 1], [1, 2], [3, 4]])
示例#16
0
 def test_add_multiple_functions(self):
     nVar = 10
     nLabels = 2
     for nFunctions in [1, 10]:
         for order in [1, 2, 3, 4]:
             gm = opengm.gm([nLabels] * nVar)
             # add functionS
             fShape = [nFunctions] + [nLabels] * order
             f = numpy.ones(fShape, dtype=opengm.value_type).reshape(-1)
             f[:] = numpy.random.rand(f.size)[:]
             f = f.reshape(fShape)
             fids = gm.addFunctions(f)
             # assertions
         assert len(fids) == nFunctions
示例#17
0
def intra_encounter_matching():
    import numpy as np
    from scipy.sparse import coo_matrix, csgraph
    qreq_, cm_list = testdata_workflow()
    # qaids = [cm.qaid for cm in cm_list]
    # top_aids = [cm.get_top_aids(5) for cm in cm_list]
    aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list
                          for daid in cm.get_top_aids(5)])
    top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list])

    N = aid_pairs.max() + 1
    mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N))
    csgraph.connected_components(mat)
    tree = csgraph.minimum_spanning_tree(mat)  # NOQA
    import plottool as pt
    dense = mat.todense()
    pt.imshow(dense / dense.max() * 255)
    pt.show_if_requested()

    # baseline jobid
    import opengm
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb
    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'), )
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
    """
示例#18
0
    def test_constructor_generic(self):
        def mygen():
            yield 2
            yield 3
            yield 4

        nos_list = [
            numpy.arange(2, 5, dtype=numpy.uint64),
            [2, 3, 4],
            (2, 3, 4),
            (x for x in xrange(2, 5)),
            mygen(),
            opengm.IndexVector(x for x in xrange(2, 5))
        ]
        for i, nos in enumerate(nos_list):
            if(type(nos) != type(mygen())):
                pass
                # assert(len(nos)==3)
            gm = opengm.gm(nos, operator='adder')
            assert(gm.numberOfVariables == 3)
            assert(gm.numberOfLabels(0) == 2)
            assert(gm.numberOfLabels(1) == 3)
            assert(gm.numberOfLabels(2) == 4)
            assert(gm.space().numberOfVariables == 3)
            assert(gm.space()[0] == 2)
            assert(gm.space()[1] == 3)
            assert(gm.space()[2] == 4)

        nos_list = [
            numpy.arange(2, 5, dtype=numpy.uint64),
            [2, 3, 4],
            (2, 3, 4),
            (x for x in xrange(2, 5)),
            mygen(),
            opengm.IndexVector(x for x in xrange(2, 5))
        ]
        for i, nos in enumerate(nos_list):
            if(type(nos) != type(mygen())):
                pass  # assert(len(nos)==3)
            gm = opengm.adder.GraphicalModel()
            gm.assign(nos)
            assert(gm.numberOfVariables == 3)
            assert(gm.numberOfLabels(0) == 2)
            assert(gm.numberOfLabels(1) == 3)
            assert(gm.numberOfLabels(2) == 4)
            assert(gm.space().numberOfVariables == 3)
            assert(gm.space()[0] == 2)
            assert(gm.space()[1] == 3)
            assert(gm.space()[2] == 4)
示例#19
0
    def test_add_multiple_functions_with_map(self):

        gm = opengm.gm([2] * 10)

        def add_a_function(w):
            return gm.addFunction(opengm.differenceFunction(shape=[2, 2],
                                                            weight=w))

        weights = [0.2, 0.3, 0.4]
        fidList = map(add_a_function, weights)

        assert isinstance(fidList, list)
        assert len(fidList) == len(weights)

        gm.addFactors(fidList, [[0, 1], [1, 2], [3, 4]])
示例#20
0
	def __init__(self,cgp):
		super(MulticutClustering, self).__init__(cgp)

		# build a graphical model 
		nVar 	    = cgp.numCells(2)
		nFac 		= cgp.numCells(1)
		cell1Bounds = cgp.cell1BoundsArray()-1

		self.gm = opengm.gm(numpy.ones(nVar,dtype=opengm.label_type)*nVar)

		# init with zero potts functions
		fids = self.gm.addFunctions(opengm.pottsFunctions([nVar,nVar],numpy.zeros(nFac),numpy.zeros(nFac) ))
		# add factors 
		self.gm.addFactors(fids,cell1Bounds)

		self.cgc = opengm.inference.Cgc(gm=self.gm,parameter=opengm.InfParam(planar=True)) 
示例#21
0
def multicutFromCgp2(cgp,e0,e1,parameter=None):
	boundArray 	= cgp.cell1BoundsArray()-1
	nVar 		= cgp.numCells(2)
	nFac 		= cgp.numCells(1)
	space 		= numpy.ones(nVar,dtype=opengm.label_type)*nVar
	gm 			= opengm.gm(space)
	#w = numpy.require(weights,dtype=opengm.value_type)
	pf=opengm.pottsFunctions([nVar,nVar],e0,e1)

	fids = gm.addFunctions(pf)
	gm.addFactors(fids,boundArray)
	cgc = opengm.inference.Cgc(gm=gm,parameter=parameter)



	return cgc,gm
示例#22
0
    def _inference(self, node_potentials, inter_potentials,
                  edges, alg, return_energy=False, return_margin=False,
                  init=None, **kwargs):
        if node_potentials.shape[1]!=self.nNodeLabel:
            raise ValueError("Node feature function parameters should match node label numbers.")
        if inter_potentials.shape[0]!=self.nNodeLabel or inter_potentials.shape[1]!=self.nNodeLabel \
        or inter_potentials.shape[2]!=self.nEdgeLabel:
            raise ValueError("Interaction potential function parameters should match combination number of labels.")

        nNodes = node_potentials.shape[0]
        nEdges = edges.shape[0]

        gm = opengm.gm(np.hstack([np.ones(nNodes, dtype=opengm.label_type)*self.nNodeLabel, \
            np.ones(nEdges, dtype=opengm.label_type)*self.nEdgeLabel]))
        gm.reserveFactors(nNodes+2*nEdges)
        gm.reserveFunctions(nNodes+2*nEdges,'explicit')

        unaries = -np.require(node_potentials, dtype=opengm.value_type)
        fidUnaries = gm.addFunctions(unaries)
        visUnaries = np.arange(nNodes, dtype=np.uint64)
        gm.addFactors(fidUnaries, visUnaries)

        inter_potentials=np.repeat(inter_potentials[np.newaxis, :, :, :],
                                        edges.shape[0], axis=0)
        highOrderFunctions = -np.require(inter_potentials, dtype=opengm.value_type)
        fidHighOrder = gm.addFunctions(highOrderFunctions)
        vidHighOrder = np.hstack([edges, np.arange(nNodes, nNodes+nEdges).reshape((nEdges,1))])
        vidHighOrder = np.require(vidHighOrder, dtype=np.uint64)
        gm.addFactors(fidHighOrder, vidHighOrder)

        if alg == 'bp':
            inference = opengm.inference.BeliefPropagation(gm)
        elif alg == 'trw':
            inference = opengm.inference.TreeReweightedBp(gm)
        if init is not None:
            inference.setStartingPoint(init)

        inference.infer()
        res = inference.arg().astype(np.int)

        if return_margin:
            node_marginals = inference.marginals(np.arange(nNodes,
            dtype=opengm.index_type))
            return node_marginals
        if return_energy:
            return res, gm.evaluate(res)
        return res
  def infer(self, pairwise_scores, unary_scores, k, mrf_type,
            filenames=None, target_class=None, bcd_scores=None):
    '''
    Returns:
      - argmin: a list of size k where each element is the label selected for the
        corresponding node.
      - t: runing time for inference
    '''
    import opengm
    num_nodes = self.num_nodes
    if mrf_type.endswith('energy'):
      energy = -pairwise_scores.sum()
      argmin = np.zeros((num_nodes,), dtype=np.int32)
      t = 1
      return argmin, t, energy
    num_edges = self.get_num_edges()
    num_labels = int(np.sqrt(pairwise_scores.shape[1]))
    indices = self.get_indices()
    gm = opengm.gm([num_labels]*num_nodes, operator='adder')
    if unary_scores is not None:
      assert unary_scores.shape[0] == k
      assert unary_scores.shape[1] == num_labels
      for i in range(num_nodes):
        gm.addFactor(gm.addFunction(-unary_scores[i]), [i])
    for i,e in enumerate(indices):
      gm.addFactor(gm.addFunction(-pairwise_scores[i].reshape(
                                      (num_labels, num_labels))), [e[0],e[1]])

    if mrf_type.endswith('astar'):
      inf = opengm.inference.AStar(gm, accumulator='minimizer')
    elif mrf_type.endswith('trws'):
      inf = opengm.inference.TrwsExternal(gm, accumulator='minimizer')
    elif mrf_type.endswith('trbp'):
      inf = opengm.inference.TreeReweightedBp(gm, accumulator='minimizer')
    elif mrf_type.endswith('map'):
      inf = opengm.inference.BeliefPropagation(gm, accumulator='minimizer')
    else:
      raise ValueError('Inference for mrf type {} is not implemented'.format(mrf_type))
    #br_inf = opengm.inference.Bruteforce(gm, accumulator='minimizer')
    #br_inf.infer()
    t0 = time.time()
    inf.infer()
    argmin = inf.arg()
    t1 = time.time() - t0
    energy = inf.value()
    return argmin, t1, energy
示例#24
0
def instantiate_sentence(sentence_data):
    # Get the number of tokens:
    numVar = len(sentence_data)
    # print 'numVar', numVar

    # The domain of the variables is the len of th y_list
    # Use the multiplier, the adder doesn't seem to work
    gm=opengm.gm([len(y_list)]*numVar,'multiplier')
    # gm=opengm.gm([len(y_list)]*numVar,'adder')
    # print gm.numberOfVariables
    # exit()

    # Create the unary factors for the pgm
    gm, feat_idxs = create_unaries(gm, sentence_data)
    # print gm.factors()
    # for fac in gm.factors():
    #     print fac   

    return gm 
示例#25
0
    def forward(self, unary_pots):
        """ Receive input tensor, return output tensor"""
        self.save_for_backward(unary_pots)

        print("In forward")
        b, r, c, k = unary_pots.size()

        if (False):
            if torch.cuda.is_available():
                unaries = unary_pots.cpu().numpy()
            else:
                unaries = unary_pots.numpy()

            unaries = unaries.reshape([b * r * c, k])
            numVar = r * c

            gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * k)
            uf_id = gm.addFunctions(unaries)
            potts = opengm.PottsFunction([k, k], 0.0, 0.4)
            pf_id = gm.addFunction(potts)

            vis = np.arange(0, numVar, dtype=np.uint64)
            # add all unary factors at once
            gm.addFactors(uf_id, vis)
            # add pairwise factors
            ### Row Factors

            for i in range(0, r):
                for j in range(0, c - 1):
                    gm.addFactor(pf_id, [i * c + j, i * c + j + 1])
            ### Column Factors
            for i in range(0, r - 1):
                for j in range(c):
                    gm.addFactor(pf_id, [i * c + j, (i + 1) * c + j])
            print("Graphical Model Constructed")
            inf = opengm.inference.AlphaExpansionFusion(gm)
            inf.infer()
            labels = inf.arg()

            return torch.from_numpy(np.asarray(labels).astype('float'))
        else:
            return torch.zeros(b, r, c)
def run_mc_opengm(segmentation, edges, energies):

    n_seg = np.max(segmentation)

    states = np.ones(n_seg)
    gm = opengm.gm(states)

    print "AAA"
    "pairwise"
    potts_shape = [ 2, 2]
    potts = opengm.pottsFunctions(potts_shape,
        np.array([0.0]),
        np.array(energies)
        )
    print "AAA"

    fids_p = gm.addFunctions(potts)
    gm.addFactors(fids_p, edges)
    gm_path = "/tmp/gm.h5"
    opengm.saveGm(gm, gm_path)
    print "AAA"

    "parameters"
    # wf = "(TTC)(MTC)(IC)(CC-IFD,TTC-I)" # default workflow
    #wf = "(IC)(TTC-I,CC-I)" # propper workflow
    # wf = "(TTC)(TTC,CC)" # lp relaxation
    param = opengm.InfParam()#workflow=wf)
    print "---inference---"
    print " starting time:", time.strftime("%H:%M:%S"), ";", time.strftime("%d/%m/%Y")
    print "..."
    inf = opengm.inference.Multicut(gm, parameter=param)
    inf.infer()
    print " end time:", time.strftime("%H:%M:%S"), ";", time.strftime("%d/%m/%Y")
    res_node = inf.arg()
    res_edge = inf.getEdgeLabeling()
    res_seg = inf.getSegmentation()
    print res_node.shape, np.unique(res_node)
    print res_edge.shape, np.unique(res_edge)
    print res_seg.shape, np.unique(res_seg)
    quit()
示例#27
0
def generate_mc_grid(dimx, dimy, operator="adder"):
    labels=dimx*dimy
    nos = numpy.ones(labels, dtype=numpy.uint64) * labels
    gm = opengm.gm(nos, operator, 0)
    for y in range(dimy):
        for x in range(dimx):
            if x + 1 < dimx:
                vis = [x + y * dimx, x + 1 + y * dimx]
                assert vis.sort is not None
                vis.sort
                l=random.random()*2.0 - 1.0
                fr=opengm.pottsFunction([labels,labels],0.0,l)
                fid2=gm.addFunction(fr)
                gm.addFactor(fid2, vis)
            if y + 1 < dimy:
                vis = [x + y * dimx, x + (y + 1) * dimx]
                vis.sort()
                l=random.random()*2.0 - 1.0
                fr=opengm.pottsFunction([labels,labels],0.0,l)
                fid2=gm.addFunction(fr)
                gm.addFactor(fid2, vis)
    return gm
示例#28
0
文件: test.py 项目: vene/opengm
def generate_mc_grid(dimx, dimy, operator="adder"):
    labels=dimx*dimy
    nos = numpy.ones(labels, dtype=numpy.uint64) * labels
    gm = opengm.gm(nos, operator, 0)
    for y in range(dimy):
        for x in range(dimx):
            if x + 1 < dimx:
                vis = [x + y * dimx, x + 1 + y * dimx]
                assert vis.sort is not None
                vis.sort
                l=random.random()*2.0 - 1.0
                fr=opengm.pottsFunction([labels,labels],0.0,l)
                fid2=gm.addFunction(fr)
                gm.addFactor(fid2, vis)
            if y + 1 < dimy:
                vis = [x + y * dimx, x + (y + 1) * dimx]
                vis.sort()
                l=random.random()*2.0 - 1.0
                fr=opengm.pottsFunction([labels,labels],0.0,l)
                fid2=gm.addFunction(fr)
                gm.addFactor(fid2, vis)
    return gm
示例#29
0
 def test_add_factors_generic(self):
     def mygen():
         yield 0
         yield 1
     gm = opengm.gm([2, 4])
     f = opengm.PottsFunction([2, 4], 0.0, 1.0)
     fid = gm.addFunction(f)
     vis_list = [
         [0, 1],
         (0, 1),
         (x for x in xrange(2)),
         mygen(),
         opengm.IndexVector(x for x in xrange(0, 2)),
         numpy.arange(0, 2, dtype=numpy.uint64)
     ]
     for i, vis in enumerate(vis_list):
         fIndex = gm.addFactor(fid, vis)
         assert(gm.numberOfFactors == i + 1)
         assert(fIndex == i)
         assert(gm[fIndex].numberOfVariables == 2)
         assert(gm[fIndex].shape[0] == 2)
         assert(gm[fIndex].shape[1] == 4)
         assert(gm[fIndex].variableIndices[0] == 0)
         assert(gm[fIndex].variableIndices[1] == 1)
示例#30
0
文件: bv_viewer.py 项目: paragt/vigra
    def onClickedMulticut(self):

        p1 = self.probs.copy()
        p1 = numpy.clip(p1, 0.005, 1.0 - 0.005)
        p0 = 1.0 - p1

        weights = numpy.log(p0 / p1)
        nVar = self.rag.maxNodeId + 1
        nos = numpy.ones(nVar) * nVar
        gm = opengm.gm(nos)

        uv = self.rag.uvIds()
        uv = numpy.sort(uv, axis=1)
        pf = opengm.pottsFunctions([nVar, nVar], numpy.array([0]), weights)
        fid = gm.addFunctions(pf)
        gm.addFactors(fid, uv)

        inf = opengm.inference.Multicut(gm)
        inf.infer(inf.verboseVisitor())
        arg = inf.arg()

        self.eArg = arg[uv[:, 0]] != arg[uv[:, 1]]

        self.ctrlWidget.modeSelectorComboBox.setCurrentIndex(6)
示例#31
0
文件: bv_viewer.py 项目: paragt/vigra
    def onClickedMulticut(self):

        p1 = self.probs.copy()
        p1 = numpy.clip(p1, 0.005, 1.0-0.005)
        p0 = 1.0 - p1

        weights = numpy.log(p0/p1)
        nVar = self.rag.maxNodeId + 1
        nos = numpy.ones(nVar)*nVar
        gm = opengm.gm(nos)

        uv = self.rag.uvIds()
        uv = numpy.sort(uv,axis=1)
        pf = opengm.pottsFunctions([nVar,nVar], numpy.array([0]),weights)
        fid = gm.addFunctions(pf)
        gm.addFactors(fid,uv)

        inf = opengm.inference.Multicut(gm)
        inf.infer(inf.verboseVisitor())
        arg = inf.arg()

        self.eArg = arg[uv[:,0]]!=arg[uv[:,1]]

        self.ctrlWidget.modeSelectorComboBox.setCurrentIndex(6)
示例#32
0
文件: denoise.py 项目: stopfer/opengm
def denoiseModel(img,
                 norm=2,
                 weight=1.0,
                 truncate=None,
                 numLabels=256,
                 neighbourhood=4,
                 inpaintPixels=None,
                 randInpaitStartingPoint=False):
    """
    this function is used to set up a graphical model similar to 
    **Denoising and inpainting problems:** from `Mrf- Benchmark <http://vision.middlebury.edu/MRF/results/ >`_
    
    Args : 
        img           : a grayscale image in the range [0,256)
        norm          : used norm for unaries and 2-order functions (default : 2)
        weight        : weight of 2-order functions (default : 1.0)
        truncate      : Truncate second order function at an given value (defaut : None)
        numLabels     : number of labels for each variable in the graphical model, 
                        set this to a lower number to speed up inference  (default : 255)
        neighbourhood : neighbourhood for the second order functions, so far only 4 is allowed (default : 4)
        inpaintPixels : a tuple of x and y coordinates where no unaries are added
        randInpaitStartingPoint : use a random starting point for all pixels without unaries (default : False)
    """
    shape = img.shape
    if (img.ndim != 2):
        raise RuntimeError("image must be gray")
    if neighbourhood != 4:
        raise RuntimeError(
            "A neighbourhood other than 4 is not yet implemented")

    # normalize and flatten image
    iMin = numpy.min(img)
    iMax = numpy.max(img)
    imgNorm = ((img[:, :] - iMin) / (iMax - iMin)) * float(numLabels)
    imgFlat = imgNorm.reshape(-1).astype(numpy.uint64)

    # Set up Grapical Model:
    numVar = int(img.size)
    gm = opengm.gm([numLabels] * numVar, operator='adder')
    gm.reserveFunctions(numLabels, 'explicit')
    numberOfPairwiseFactors = shape[0] * (shape[1] -
                                          1) + shape[1] * (shape[0] - 1)
    gm.reserveFactors(numVar - len(inpaintPixels[0]) + numberOfPairwiseFactors)

    # Set up unaries:
    # - create a range of all possible labels
    allPossiblePixelValues = numpy.arange(numLabels)
    pixelValueRep = numpy.repeat(allPossiblePixelValues[:, numpy.newaxis],
                                 numLabels, 1)
    # - repeat [0,1,2,3,...,253,254,255] numVar times
    labelRange = numpy.arange(numLabels, dtype=opengm.value_type)
    labelRange = numpy.repeat(labelRange[numpy.newaxis, :], numLabels, 0)
    unaries = numpy.abs(pixelValueRep - labelRange)**norm
    # - add unaries to the graphical model
    fids = gm.addFunctions(unaries.astype(opengm.value_type))
    # add unary factors to graphical model
    if (inpaintPixels is None):
        for l in xrange(numLabels):
            whereL = numpy.where(imgFlat == l)
            gm.addFactors(fids[l], whereL[0].astype(opengm.index_type))
    else:
        # get vis of inpaint pixels
        ipX = inpaintPixels[0]
        ipY = inpaintPixels[1]
        ipVi = ipX * shape[1] + ipY

        for l in xrange(numLabels):
            whereL = numpy.where(imgFlat == l)
            notInInpaint = numpy.setdiff1d(whereL[0], ipVi)
            gm.addFactors(fids[l], notInInpaint.astype(opengm.index_type))

    # add ONE second order function
    f = opengm.differenceFunction(shape=[numLabels, numLabels],
                                  norm=2,
                                  weight=weight)
    fid = gm.addFunction(f)
    vis2Order = opengm.secondOrderGridVis(shape[0], shape[1], True)
    # add all second order factors
    gm.addFactors(fid, vis2Order)

    # create a starting point
    startingPoint = imgFlat.copy()
    if randInpaitStartingPoint:
        startingPointRandom = numpy.random.randint(
            0, numLabels, size=numVar).astype(opengm.index_type)

        ipVi = inpaintPixels[0] * shape[1] + inpaintPixels[1]
        for x in ipVi:
            startingPoint[x] = startingPointRandom[x]

    startingPoint[startingPoint == numLabels] = numLabels - 1
    return gm, startingPoint.astype(opengm.index_type)
示例#33
0
def crftest():
    """
    pip install pyqpbo
    pip install pystruct

    http://taku910.github.io/crfpp/#install

    cd ~/tmp
    #wget https://drive.google.com/folderview?id=0B4y35FiV1wh7fngteFhHQUN2Y1B5eUJBNHZUemJYQV9VWlBUb3JlX0xBdWVZTWtSbVBneU0&usp=drive_web#list
    7z x CRF++-0.58.tar.gz
    7z x CRF++-0.58.tar
    cd CRF++-0.58
    chmod +x configure
    ./configure
    make

    """
    import pystruct
    import pystruct.models

    inference_method_options = ['lp', 'max-product']
    inference_method = inference_method_options[1]

    # graph = pystruct.models.GraphCRF(
    #    n_states=None,
    #    n_features=None,
    #    inference_method=inference_method,
    #    class_weight=None,
    #    directed=False,
    # )

    num_annots = 5
    num_names = num_annots

    aids = np.arange(5)
    rng = np.random.RandomState(0)
    hidden_nids = rng.randint(0, num_names, num_annots)
    unique_nids, groupxs = ut.group_indices(hidden_nids)

    # Indicator vector indicating the name
    node_features = np.zeros((num_annots, num_names))
    node_features[(aids, hidden_nids)] = 1

    toy_params = {True: {'mu': 1.0, 'sigma': 2.2}, False: {'mu': 7.0, 'sigma': 0.9}}
    if False:
        import vtool as vt
        import wbia.plottool as pt

        pt.ensureqt()
        xdata = np.linspace(0, 100, 1000)
        tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
        fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
        pt.plot_probabilities([tp_pdf, fp_pdf], ['TP', 'TF'], xdata=xdata)

    def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            return 0
        rng = np.random.RandomState(int(aidx1 + aidx2))
        same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
        mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
        return np.clip(rng.normal(mu, sigma), 0, np.inf)

    pairwise_aidxs = list(ut.iprod(range(num_annots), range(num_annots)))
    pairwise_labels = np.array(  # NOQA
        [hidden_nids[a1] == hidden_nids[a2] for a1, a2 in pairwise_aidxs]
    )
    pairwise_scores = np.array([metric(*zz) for zz in pairwise_aidxs])
    pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)  # NOQA

    graph = pystruct.models.EdgeFeatureGraphCRF(  # NOQA
        n_states=num_annots,
        n_features=num_names,
        n_edge_features=1,
        inference_method=inference_method,
    )

    import opengm

    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'))
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
示例#34
0
def denoiseModel(
    img,
    norm                    = 2,
    weight                  = 1.0,
    truncate                = None,
    numLabels               = 256,
    neighbourhood           = 4,
    inpaintPixels           = None,
    randInpaitStartingPoint = False
):
    """
    this function is used to set up a graphical model similar to 
    **Denoising and inpainting problems:** from `Mrf- Benchmark <http://vision.middlebury.edu/MRF/results/ >`_
    
    Args : 
        img           : a grayscale image in the range [0,256)
        norm          : used norm for unaries and 2-order functions (default : 2)
        weight        : weight of 2-order functions (default : 1.0)
        truncate      : Truncate second order function at an given value (defaut : None)
        numLabels     : number of labels for each variable in the graphical model, 
                        set this to a lower number to speed up inference  (default : 255)
        neighbourhood : neighbourhood for the second order functions, so far only 4 is allowed (default : 4)
        inpaintPixels : a tuple of x and y coordinates where no unaries are added
        randInpaitStartingPoint : use a random starting point for all pixels without unaries (default : False)
    """
    shape = img.shape
    if(img.ndim!=2):
        raise RuntimeError("image must be gray")
    if neighbourhood != 4 :
        raise RuntimeError("A neighbourhood other than 4 is not yet implemented")

    # normalize and flatten image
    iMin    = numpy.min(img)
    iMax    = numpy.max(img)
    imgNorm = ((img[:,:]-iMin)/(iMax-iMin))*float(numLabels)
    imgFlat = imgNorm.reshape(-1).astype(numpy.uint64)

    # Set up Grapical Model:
    numVar = int(img.size)
    gm = opengm.gm([numLabels]*numVar,operator='adder')
    gm.reserveFunctions(numLabels,'explicit')
    numberOfPairwiseFactors=shape[0]*(shape[1]-1) + shape[1]*(shape[0]-1)
    gm.reserveFactors(numVar-len(inpaintPixels[0]) + numberOfPairwiseFactors )

    # Set up unaries:
    # - create a range of all possible labels
    allPossiblePixelValues=numpy.arange(numLabels)
    pixelValueRep    = numpy.repeat(allPossiblePixelValues[:,numpy.newaxis],numLabels,1)
    # - repeat [0,1,2,3,...,253,254,255] numVar times
    labelRange = numpy.arange(numLabels,dtype=opengm.value_type)
    labelRange = numpy.repeat(labelRange[numpy.newaxis,:], numLabels, 0)
    unaries = numpy.abs(pixelValueRep - labelRange)**norm
    # - add unaries to the graphical model
    fids=gm.addFunctions(unaries.astype(opengm.value_type))
    # add unary factors to graphical model
    if(inpaintPixels is None):
        for l in xrange(numLabels):
            whereL=numpy.where(imgFlat==l)
            gm.addFactors(fids[l],whereL[0].astype(opengm.index_type))
    else:
        # get vis of inpaint pixels
        ipX  = inpaintPixels[0]
        ipY  = inpaintPixels[1]
        ipVi = ipX*shape[1] + ipY

        for l in xrange(numLabels):
            whereL=numpy.where(imgFlat==l)
            notInInpaint=numpy.setdiff1d(whereL[0],ipVi)
            gm.addFactors(fids[l],notInInpaint.astype(opengm.index_type))

    # add ONE second order function
    f=opengm.differenceFunction(shape=[numLabels,numLabels],norm=2,weight=weight)
    fid=gm.addFunction(f)
    vis2Order=opengm.secondOrderGridVis(shape[0],shape[1],True)
    # add all second order factors
    gm.addFactors(fid,vis2Order)

    # create a starting point
    startingPoint = imgFlat.copy()
    if randInpaitStartingPoint :
        startingPointRandom = numpy.random.randint(0,numLabels,size=numVar).astype(opengm.index_type)

        ipVi = inpaintPixels[0]*shape[1] + inpaintPixels[1]
        for x in ipVi:
            startingPoint[x]=startingPointRandom[x]

    startingPoint[startingPoint==numLabels]=numLabels-1            
    return gm,startingPoint.astype(opengm.index_type)
示例#35
0
from opengm import learning
np = numpy

numLabels = 3
numVar = 6

#################################################################
# add a unary function
##################################################################

print opengm.learning.DatasetWithHammingLoss
print opengm.learning.HammingLoss

# make the gm
space = numpy.ones(numVar) * numLabels
gm = opengm.gm(space)

weightVals = numpy.ones(100) * 1.0
weights = opengm.learning.Weights(weightVals)

##################################################################
# add a unary function
##################################################################
features = numpy.ones([numLabels, 2], dtype=opengm.value_type)
weightIds = numpy.ones([numLabels, 2], dtype=opengm.index_type)

# set up weight ids for each label
weightIds[0, :] = [0, 1]
weightIds[1, :] = [2, 3]
weightIds[2, :] = [4, 5]
示例#36
0
    def __init__(self,
                 tl_line_rec,
                 region_det,
                 line_rec,
                 line_pts,
                 stats,
                 scale=1.0,
                 sign_hypos=None,
                 param_dict=None):
        # create graphical model from fragement
        self.stats = stats
        self.scale = scale
        self.scaled_sign_height = stats.tblSignHeight * scale
        self.min_sign_dist = self.scaled_sign_height / 2.  # distance between sign centers

        self.tl_line_rec = tl_line_rec

        # null hypothesis for signs in tl
        self.sign_hypos = sign_hypos

        # detections contained in rectangluar area around respective alignments
        # [ID, cx, cy, score, x1, y1, x2, y2, idx]
        self.region_det = region_det

        # init
        self.num_vars = len(self.tl_line_rec)
        self.num_relevant = 0
        self.max_cost = 1e10  # 1e11  # "inifinite" cost

        # only continue, if there is a sign in line to match
        if self.num_vars > 0:

            # compute num_lbls_per_var from detections
            ulbls, counts = np.unique(self.region_det[:, 0],
                                      return_counts=True)
            hypo_det_counts = np.array([
                counts[ulbls == item] if item in ulbls else 0
                for item in self.tl_line_rec.lbl
            ],
                                       dtype=int).squeeze()

            self.tl_line_rec['det_count'] = hypo_det_counts
            # optional: remove vars without detections
            if False:
                # self.tl_line_rec = self.tl_line_rec[hypo_det_counts > 0]
                self.tl_line_rec = self.tl_line_rec.iloc[np.where(
                    hypo_det_counts > 0
                )]  # deal with scalar case of boolean indexing
                # hypo_det_counts = hypo_det_counts[hypo_det_counts > 0]

            # only continue, at least a single matching detection
            self.num_relevant = np.sum(counts[np.isin(ulbls,
                                                      self.tl_line_rec.lbl)])
            if self.num_relevant > 0:
                # update num_vars
                self.num_vars = len(self.tl_line_rec)
                # opengm setup
                self.num_lbls_per_var = max(counts[np.isin(
                    ulbls, self.tl_line_rec.lbl)]) + 1  # + 1 outlier detection
                var_space = np.ones(self.num_vars) * self.num_lbls_per_var
                self.gm = opengm.gm(var_space)

                # parameter setup
                if param_dict is not None:
                    self.params = param_dict
                else:
                    self.params = dict()

                    # extra settings
                    self.params['outlier_cost'] = 10
                    self.params['angle_long_range'] = True

                    # unary potentials
                    self.params['lambda_score'] = 0.3
                    self.params['sigma_score'] = 0.4

                    self.params[
                        'lambda_offset'] = 1  # currently offset used linearly without exp function
                    self.params[
                        'sigma_offset'] = 1  # lambda & sigma have no influence!

                    # pairwise binary potentials
                    self.params['lambda_p'] = 3  # 1
                    self.params['sigma_p'] = 3

                    self.params['lambda_angle'] = 2
                    self.params['sigma_angle'] = 0.6

                    self.params['lambda_iou'] = 2
                    self.params['sigma_iou'] = 0.4

                    # OPTIONAL: strong penalties for long range connections
                    if True:
                        self.params['lr_lambda_angle'] = 0.05
                        self.params['lr_sigma_angle'] = 0.1

                        self.params['lr_lambda_iou'] = 0.1
                        self.params['lr_sigma_iou'] = 0.05
                    else:
                        self.params['lr_lambda_angle'] = self.params[
                            'lambda_angle']
                        self.params['lr_sigma_angle'] = self.params[
                            'sigma_angle']

                        self.params['lr_lambda_iou'] = self.params[
                            'lambda_iou']
                        self.params['lr_sigma_iou'] = self.params['sigma_iou']

                # angle of hypothesis line
                self.b = line_pts[-1, :] - line_pts[0, :]
                # print 'hypo angle:', np.arctan2(self.b[1], self.b[0]) * (180 / np.pi), self.b

                # offset
                self.Xb = line_pts[0, :].reshape(1, -1)

                # define variance between line distance and sign distance - for seculidean and mahalanobis
                self.variance_p = np.array([1, 0.2],
                                           dtype=np.float)  # [8, 1] [1, 1]

                if False:
                    print('#syms:', len(self.tl_line_rec), 'max#dets_per_sym:',
                          self.num_lbls_per_var - 1, 'relevant#dets:',
                          self.num_relevant, 'total#dets:',
                          self.region_det.shape[0])

                # print(np.vstack([self.fm_hypo_df.lbl, hypo_det_counts])).astype(int)
                # print self.fm_hypo_df

                # assemble potentials
                self.add_unary()
                self.add_pairwise()
import opengm
import numpy

#------------------------------------------------------------------------------------
# This example shows how multiple  unaries functions and functions / factors add once
#------------------------------------------------------------------------------------
# add unaries from a for a 2d grid / image
width=10
height=20
numVar=width*height
numLabels=2
# construct gm
gm=opengm.gm(numpy.ones(numVar,dtype=opengm.index_type)*numLabels)
# construct an array with all unaries (random in this example)
unaries=numpy.random.rand(width,height,numLabels)
# reshape unaries is such way, that the first axis is for the different functions
unaries2d=unaries.reshape([numVar,numLabels])
# add all unary functions at once (#numVar unaries)
fids=gm.addFunctions(unaries2d)
# numpy array with the variable indices for all factors
vis=numpy.arange(0,numVar,dtype=numpy.uint64)
# add all unary factors at once
gm.addFactors(fids,vis)
import opengm
import numpy

#------------------------------------------------------------------------------------
# This example shows how multiple  unaries functions and functions / factors add once
#------------------------------------------------------------------------------------
# add unaries from a for a 2d grid / image
width = 10
height = 20
numVar = width * height
numLabels = 2
# construct gm
gm = opengm.gm(numpy.ones(numVar, dtype=opengm.index_type) * numLabels)
# construct an array with all unaries (random in this example)
unaries = numpy.random.rand(width, height, numLabels)
# reshape unaries is such way, that the first axis is for the different functions
unaries2d = unaries.reshape([numVar, numLabels])
# add all unary functions at once (#numVar unaries)
fids = gm.addFunctions(unaries2d)
# numpy array with the variable indices for all factors
vis = numpy.arange(0, numVar, dtype=numpy.uint64)
# add all unary factors at once
gm.addFactors(fids, vis)
import numpy
import opengm
import matplotlib.pyplot as plt

f1 = numpy.ones([2])
f2 = numpy.ones([2, 2])
f3 = numpy.ones([2, 2, 2])
"""
Triangle (non-shared) :
    - 3 variables
    - 3 unaries
    - 2 second order functions
    - 1 third order factor
    - functions are *non* - shared
"""
gm = opengm.gm([2, 2, 2])
gm.addFactor(gm.addFunction(f1), [0])
gm.addFactor(gm.addFunction(f1), [1])
gm.addFactor(gm.addFunction(f1), [2])
gm.addFactor(gm.addFunction(f2), [0, 1])
gm.addFactor(gm.addFunction(f2), [1, 2])
gm.addFactor(gm.addFunction(f2), [0, 2])
gm.addFactor(gm.addFunction(f3), [0, 1, 2])

opengm.visualizeGm(gm, show=False, plotFunctions=True, plotNonShared=True)
plt.savefig("triangle.png", bbox_inches='tight', dpi=300)
plt.close()
示例#40
0
import opengm
import numpy

gm=opengm.gm([2,2,3,3,4,4,4],operator='adder')
functionIds=[]

#---------------------------------------------------------------
# Numpy Ndarray
# (is stored in a different multi array function within opengm)
#---------------------------------------------------------------

f=numpy.random.rand(2,2,3,4)
fid=gm.addFunction(f)
gm.addFactor(fid,[0,1,2,4])
print "\nexplicit function: \n",f

#---------------------------------------------------------------
# Sparse Function
#--------------------------------------------------------------- 

# fill sparse function "by hand"
f=opengm.SparseFunction(shape=[3,4,4],defaultValue=1)
# fill diagonale with zeros
for d in xrange(4):
    f[[d,d,d]]=0
print "\nsparse function: \n",f
fid=gm.addFunction(f)
functionIds.append(fid)
gm.addFactor(fid,[3,4,5])

# fill sparse function from dense function
示例#41
0
文件: grid.py 项目: MasazI/GM_opengm2
img += noise
img -= img.min()
img /= img.max()
print "shape", img.shape
vigra.imshow(img)
#vigra.show()

threshold = 0.24
labelsNaive = img > threshold
vigra.imshow(labelsNaive)
#vigra.show()

nVar = img.size
nLabelsPerVar = 2
variableSpace = numpy.ones(nVar) * nLabelsPerVar
gm = opengm.gm(variableSpace)

t0 = time.time()
# add unaries
for y in range(img.shape[1]):
    for x in range(img.shape[0]):

        energy0 = img[x, y] - threshold
        energy1 = threshold - img[x, y]
        unaryFunction = numpy.array([energy0, energy1])

        # add unary function to graphical model
        functionId = gm.addFunction(unaryFunction)

        # add unary factor to graphical model
        variableIndex = y + x * img.shape[1]
示例#42
0
    def _inference(self,
                   node_potentials,
                   edge_potentials,
                   inter_potentials,
                   edges,
                   alg,
                   return_energy=False,
                   return_margin=False,
                   init=None,
                   **kwargs):
        if node_potentials.shape[1] != self.nNodeLabel:
            raise ValueError(
                "Node feature function parameters should match node label numbers."
            )
        if edge_potentials.shape[0] != edges.shape[0]:
            raise ValueError(
                "Edge feature function numbers should match given edges.")
        if edge_potentials.shape[1] != self.nEdgeLabel:
            raise ValueError(
                "Edge feature function parameters should match edge label numbers."
            )
        if inter_potentials.shape[0] != edges.shape[0]:
            raise ValueError(
                "Interaction potential function number should match edge numbers."
            )
        if inter_potentials.shape[1]!=self.nNodeLabel or inter_potentials.shape[2]!=self.nNodeLabel \
        or inter_potentials.shape[3]!=self.nEdgeLabel:
            raise ValueError(
                "Interaction potential function parameters should match combination number of labels."
            )

        nNodes = node_potentials.shape[0]
        nEdges = edges.shape[0]

        gm = opengm.gm(np.hstack([np.ones(nNodes, dtype=opengm.label_type)*self.nNodeLabel, \
            np.ones(nEdges, dtype=opengm.label_type)*self.nEdgeLabel]))
        gm.reserveFactors(nNodes + 2 * nEdges)
        gm.reserveFunctions(nNodes + 2 * nEdges, 'explicit')

        unaries = -np.require(node_potentials, dtype=opengm.value_type)
        fidUnaries = gm.addFunctions(unaries)
        visUnaries = np.arange(nNodes, dtype=np.uint64)
        gm.addFactors(fidUnaries, visUnaries)

        unaries = -np.require(edge_potentials, dtype=opengm.value_type)
        fidUnaries = gm.addFunctions(unaries)
        visUnaries = np.arange(nNodes, nEdges + nNodes, dtype=np.uint64)
        gm.addFactors(fidUnaries, visUnaries)

        highOrderFunctions = -np.require(inter_potentials,
                                         dtype=opengm.value_type)
        fidHighOrder = gm.addFunctions(highOrderFunctions)
        vidHighOrder = np.hstack(
            [edges,
             np.arange(nNodes, nNodes + nEdges).reshape((nEdges, 1))])
        vidHighOrder = np.require(vidHighOrder, dtype=np.uint64)
        gm.addFactors(fidHighOrder, vidHighOrder)

        if alg == 'bp':
            inference = opengm.inference.BeliefPropagation(gm)
        elif alg == 'dd':
            inference = opengm.inference.DualDecompositionSubgradient(gm)
        elif alg == 'trws':
            inference = opengm.inference.TrwsExternal(gm)
        elif alg == 'trw':
            inference = opengm.inference.TreeReweightedBp(gm)
        elif alg == 'gibbs':
            inference = opengm.inference.Gibbs(gm)
        elif alg == 'lf':
            inference = opengm.inference.LazyFlipper(gm)
        elif alg == 'icm':
            inference = opengm.inference.Icm(gm)
        elif alg == 'dyn':
            inference = opengm.inference.DynamicProgramming(gm)
        elif alg == 'fm':
            inference = opengm.inference.AlphaExpansionFusion(gm)
        elif alg == 'gc':
            inference = opengm.inference.GraphCut(gm)
        elif alg == 'loc':
            inference = opengm.inference.Loc(gm)
        elif alg == 'mqpbo':
            inference = opengm.inference.Mqpbo(gm)
        elif alg == 'alphaexp':
            inference = opengm.inference.AlphaExpansion(gm)
        elif alg == 'lp':
            parameter = opengm.InfParam(integerConstraint=True)
            inference = opengm.inference.LpCplex(gm, parameter=parameter)
        if init is not None:
            inference.setStartingPoint(init)

        inference.infer()
        res = inference.arg().astype(np.int)
        if return_margin:
            node_marginals = inference.marginals(
                np.arange(nNodes, dtype=opengm.index_type))
            edge_marginals = inference.marginals(
                np.arange(nNodes, nNodes + nEdges, dtype=opengm.index_type))
            return (node_marginals, edge_marginals)
        if return_energy:
            return res, gm.evaluate(res)
        return res
示例#43
0
    def _inference(self,
                   node_potentials,
                   inter_potentials,
                   edges,
                   alg,
                   return_energy=False,
                   return_margin=False,
                   init=None,
                   **kwargs):
        if node_potentials.shape[1] != self.nNodeLabel:
            raise ValueError(
                "Node feature function parameters should match node label numbers."
            )
        if inter_potentials.shape[0]!=self.nNodeLabel or inter_potentials.shape[1]!=self.nNodeLabel \
        or inter_potentials.shape[2]!=self.nEdgeLabel:
            raise ValueError(
                "Interaction potential function parameters should match combination number of labels."
            )

        nNodes = node_potentials.shape[0]
        nEdges = edges.shape[0]

        gm = opengm.gm(np.hstack([np.ones(nNodes, dtype=opengm.label_type)*self.nNodeLabel, \
            np.ones(nEdges, dtype=opengm.label_type)*self.nEdgeLabel]))
        gm.reserveFactors(nNodes + 2 * nEdges)
        gm.reserveFunctions(nNodes + 2 * nEdges, 'explicit')

        unaries = -np.require(node_potentials, dtype=opengm.value_type)
        fidUnaries = gm.addFunctions(unaries)
        visUnaries = np.arange(nNodes, dtype=np.uint64)
        gm.addFactors(fidUnaries, visUnaries)

        inter_potentials = np.repeat(inter_potentials[np.newaxis, :, :, :],
                                     edges.shape[0],
                                     axis=0)
        highOrderFunctions = -np.require(inter_potentials,
                                         dtype=opengm.value_type)
        fidHighOrder = gm.addFunctions(highOrderFunctions)
        vidHighOrder = np.hstack(
            [edges,
             np.arange(nNodes, nNodes + nEdges).reshape((nEdges, 1))])
        vidHighOrder = np.require(vidHighOrder, dtype=np.uint64)
        gm.addFactors(fidHighOrder, vidHighOrder)

        if alg == 'bp':
            inference = opengm.inference.BeliefPropagation(gm)
        elif alg == 'trw':
            inference = opengm.inference.TreeReweightedBp(gm)
        if init is not None:
            inference.setStartingPoint(init)

        inference.infer()
        res = inference.arg().astype(np.int)

        if return_margin:
            node_marginals = inference.marginals(
                np.arange(nNodes, dtype=opengm.index_type))
            return node_marginals
        if return_energy:
            return res, gm.evaluate(res)
        return res
示例#44
0
文件: grid.py 项目: thorbenk/opengm
import matplotlib.pyplot as plt


f1=numpy.ones([2])
f2=numpy.ones([2,2])

"""
Grid:
    - 4x4=16 variables
    - second order factors in 4-neigbourhood
      all connected to the same function
    - higher order functions are shared
"""

size=3
gm=opengm.gm([2]*size*size)

fid=gm.addFunction(f2)
for y in range(size):   
    for x in range(size):
        gm.addFactor(gm.addFunction(f1),x*size+y)
        if(x+1<size):
            gm.addFactor(fid,[x*size+y,(x+1)*size+y])
        if(y+1<size):
            gm.addFactor(fid,[x*size+y,x*size+(y+1)])


opengm.visualizeGm( gm,layout='spring',iterations=3000,
                    show=False,plotFunctions=True,
                    plotNonShared=True,relNodeSize=0.4)
plt.savefig("grid.png",bbox_inches='tight',dpi=300) 
示例#45
0
def generate_pgm(if_data, verbose=False):
    # gather data from the if data object
    query_graph = if_data.current_sg_query
    object_detections = if_data.object_detections
    attribute_detections = if_data.attribute_detections
    relationship_models = if_data.relationship_models
    per_object_attributes = if_data.per_object_attributes
    image_filename = if_data.image_filename
    
    # generate the graphical model (vg_data_build_gm_for_image)
    n_objects = len(query_graph.objects)
    n_vars = []
    object_is_detected = []
    query_to_pgm = []
    pgm_to_query = []
    
    master_box_coords = []
    
    varcount = 0
    for obj_ix in range(0, n_objects):
        query_object_name = query_graph.objects[obj_ix].names
        
        # occasionally, there are multiple object names (is 0 the best?)
        if isinstance(query_object_name, np.ndarray):
          query_object_name = query_object_name[0]
          
        object_name = "obj:" + query_object_name
        if object_name not in object_detections:
            object_is_detected.append(False)
            query_to_pgm.append(-1)
        else:
            if len(master_box_coords) == 0:
                master_box_coords = np.copy(object_detections[object_name][:,0:4])
            object_is_detected.append(True)
            query_to_pgm.append(varcount)
            varcount += 1
            pgm_to_query.append(obj_ix)
            
            n_labels = len(object_detections[object_name])
            n_vars.append(n_labels)
    
    gm = ogm.gm(n_vars, operator='adder')
    
    functions = []
    
    # generate 1st order functions for objects
    # TODO: test an uniform dist for missing objects
    unary_dets = []
    is_cnn_detected = []
    for obj_ix in range(0, n_objects):
        fid = None
        
        pgm_ix = query_to_pgm[obj_ix]
        object_name = query_graph.objects[obj_ix].names
        if isinstance(object_name, np.ndarray):
            object_name = object_name[0]
        detail = "unary function for object '{0}'".format(object_name)
        
        if object_is_detected[obj_ix]:
            is_cnn_detected.append(True)
            prefix_object_name = "obj:" + object_name
            detections = object_detections[prefix_object_name]
            unary_dets.append(detections[:,4])
            log_scores = -np.log(detections[:,4])
            fid = gm.addFunction(log_scores)
        else:
            continue
        
        func_detail = FuncDetail(fid, [pgm_ix], "explicit", "object unaries", detail)
        functions.append(func_detail)
    
    #generate 1st order functions for attributes
    n_attributes = len(per_object_attributes)
    for attr_ix in range(0, n_attributes):
        obj_ix = int(per_object_attributes[attr_ix][0])
        pgm_ix = query_to_pgm[obj_ix]
        attribute_name = per_object_attributes[attr_ix][1]
        prefix_attribute_name = "atr:" + attribute_name
        
        if prefix_attribute_name not in attribute_detections:
            continue
        
        if not object_is_detected[obj_ix]:
            continue
        
        detections = attribute_detections[prefix_attribute_name]
        log_scores = -np.log(detections[:,4])
        
        detail = "unary function for attribute '{0}' of object '{1}' (qry_ix:{2}, pgm_ix:{3})".format(attribute_name, query_graph.objects[obj_ix].names, obj_ix, pgm_ix)
        
        fid = gm.addFunction(log_scores)
        func_detail = FuncDetail(fid, [pgm_ix], "explicit", "attribute unaries", detail)
        functions.append(func_detail)
    
    # generate a tracker for storing obj/attr/rel likelihoods (pre-inference)
    tracker = DetectionTracker(image_filename)
    for i in range(0, n_objects):
        if object_is_detected[i]:
            if isinstance(query_graph.objects[i].names, np.ndarray):
                tracker.object_names.append(query_graph.objects[i].names[0])
            else:
                tracker.object_names.append(query_graph.objects[i].names)
    tracker.unary_detections = unary_dets
    tracker.box_coords = master_box_coords
    tracker.detected_vars = is_cnn_detected
    
    # generate 2nd order functions for binary relationships
    trip_root = query_graph.binary_triples
    trip_list = []
    if isinstance(trip_root, sio.matlab.mio5_params.mat_struct):
        trip_list.append(query_graph.binary_triples)
    else:
        # if there's only one relationship, we don't have an array :/
        for trip in trip_root:
            trip_list.append(trip)
    
    # generate a single cartesian product of the boxes
    # this will only work when all objects are detected across the same boxes
    # we know this is the case for this implementation
    master_cart_prod = None
    for i in range(0, n_objects):
        if object_is_detected[i]:
            obj_name = query_graph.objects[i].names
            boxes = None
            if isinstance(obj_name, np.ndarray):
                boxes = object_detections["obj:"+obj_name[0]]
            else:
                boxes = object_detections["obj:"+obj_name]
            master_cart_prod = np.array([x for x in itertools.product(boxes, boxes)])
            break
    tracker.box_pairs = master_cart_prod
    
    # process each binary triple in the list
    for trip in trip_list:
        sub_ix = trip.subject
        sub_pgm_ix = query_to_pgm[sub_ix]
        subject_name = query_graph.objects[sub_ix].names
        if isinstance(subject_name, np.ndarray):
            subject_name = subject_name[0]
        
        obj_ix = trip.object
        obj_pgm_ix = query_to_pgm[obj_ix]
        object_name = query_graph.objects[obj_ix].names
        if isinstance(object_name, np.ndarray):
            object_name = object_name[0]
        
        relationship = trip.predicate
        bin_trip_key = subject_name + "_" + relationship.replace(" ", "_")  + "_" + object_name
        
        # check if there is a gmm for the specific triple string
        if bin_trip_key not in relationship_models:
            bin_trip_key = "*_" + relationship.replace(" ", "_") + "_*"
            if bin_trip_key not in relationship_models:
                continue
        
        # verify object detections
        if sub_ix == obj_ix:
            continue
        
        if not object_is_detected[sub_ix]:
            continue
        
        if not object_is_detected[obj_ix]:
            continue
        
        # get model parameters
        prefix_object_name = "obj:" + object_name
        bin_object_box = object_detections[prefix_object_name]
        
        prefix_subject_name = "obj:" + subject_name
        bin_subject_box = object_detections[prefix_subject_name]
        
        rel_params = relationship_models[bin_trip_key]
        
        # generate features from subject and object detection boxes
        cart_prod = master_cart_prod
        sub_dim = 0
        obj_dim = 1
        
        subx_center = cart_prod[:, sub_dim, 0] + 0.5 * cart_prod[:, sub_dim, 2]
        suby_center = cart_prod[:, sub_dim, 1] + 0.5 * cart_prod[:, sub_dim, 3]
        
        objx_center = cart_prod[:, obj_dim, 0] + 0.5 * cart_prod[:, obj_dim, 2]
        objy_center = cart_prod[:, obj_dim, 1] + 0.5 * cart_prod[:, obj_dim, 3]
        
        sub_width = cart_prod[:, sub_dim, 2]
        relx_center = (subx_center - objx_center) / sub_width
        
        sub_height = cart_prod[:, sub_dim, 3]
        rely_center = (suby_center - objy_center) / sub_height
        
        rel_height = cart_prod[:, obj_dim, 2] / cart_prod[:, sub_dim, 2]
        rel_width = cart_prod[:, obj_dim, 3] / cart_prod[:, sub_dim, 3]
        
        features = np.vstack((relx_center, rely_center, rel_height, rel_width)).T
        
        #tracker.box_pairs = np.copy(cart_prod) #TODO: is this copy necessary?
        #tracker.box_pairs = cart_prod
        
        # generate scores => log(epsilon+scores) => platt sigmoid
        scores = gmm_pdf(features, rel_params.gmm_weights, rel_params.gmm_mu, rel_params.gmm_sigma)
        eps = np.finfo(np.float).eps
        scores = np.log(eps + scores)
        sig_scores = 1.0 / (1. + np.exp(rel_params.platt_a * scores + rel_params.platt_b))
        
        log_likelihoods = -np.log(sig_scores)
        
        #tracker.add_group(bin_trip_key, np.copy(log_likelihoods), np.copy(bin_object_box), object_name, np.copy(bin_subject_box), subject_name) # TODO: are these copy calls necessary?
        tracker.add_group(bin_trip_key, log_likelihoods, bin_object_box, object_name, obj_pgm_ix, bin_subject_box, subject_name, sub_pgm_ix)
        
        # generate the matrix of functions
        n_subject_val = len(bin_subject_box)
        n_object_val = len(bin_object_box)
        bin_functions = np.reshape(log_likelihoods, (n_subject_val, n_object_val)) # TODO: determine if any transpose is needed
        if obj_pgm_ix < sub_pgm_ix: bin_functions = bin_functions.T
        
        # add binary functions to the GM
        detail = "binary functions for relationship '%s'" % (bin_trip_key)
        fid = gm.addFunction(bin_functions)
        
        var_indices = [sub_pgm_ix, obj_pgm_ix]
        if obj_pgm_ix < sub_pgm_ix: var_indices = [obj_pgm_ix, sub_pgm_ix]
        func_detail = FuncDetail(fid, var_indices, "explicit", "binary functions", detail)
        functions.append(func_detail)
        
    # add 1st order factors (fid)
    for f in functions:
        n_indices = len(f.var_indices)
        if n_indices == 1:
            gm.addFactor(f.gm_function_id, f.var_indices[0])
        elif n_indices == 2:
            gm.addFactor(f.gm_function_id, f.var_indices)
        else:
            continue
    
    return gm, tracker
示例#46
0
def dummy_cut_example():
    r"""
    CommandLine:
        python -m ibeis.workflow --exec-dummy_cut_example --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.workflow import *  # NOQA
        >>> result = dummy_cut_example()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import opengm
    import numpy as np
    import plottool as pt
    pt.ensure_pylab_qt4()
    # Matching Graph
    cost_matrix = np.array([
        [0.5, 0.6, 0.2, 0.4],
        [0.0, 0.5, 0.2, 0.9],
        [0.0, 0.0, 0.5, 0.1],
        [0.0, 0.0, 0.0, 0.5],
    ])
    cost_matrix += cost_matrix.T
    number_of_labels = 4
    num_annots = 4
    #cost_matrix = (cost_matrix * 2) - 1

    #gm = opengm.gm(number_of_labels)
    gm = opengm.gm(np.ones(num_annots) * number_of_labels)
    aids = np.arange(num_annots)
    aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod(
        aids, aids) if a1 != a2], dtype=np.uint32)
    aid_pairs.sort(axis=1)

    # add a potts function
    # penalizes neighbors for having different labels
    # beta = 0   # 0.1  # strength of potts regularizer
    #beta = 0.1   # 0.1  # strength of potts regularizer

    # Places to look for the definition of this stupid class
    # ~/code/opengm/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
    # /src/interfaces/python/opengm/opengmcore/function_injector.py

    #shape = [number_of_labels] * 2
    #regularizer = opengm.PottsGFunction(shape, 0.0, beta)
    # __init__( (object)arg1, (object)shape [, (object)values=()]) -> object :

    # values = np.arange(1, ut.num_partitions(num_annots) + 1)
    #regularizer = opengm.PottsGFunction(shape)
    #reg_fid = gm.addFunction(regularizer)

    # A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems
    # http://arxiv.org/pdf/1404.0533.pdf

    # regularizer1 = opengm.pottsFunction([number_of_labels] * 2, valueEqual=0.0, valueNotEqual=beta)

    # gm.addFactors(reg_fid, aid_pairs)

    # 2nd order function
    pair_fid = gm.addFunction(cost_matrix)
    gm.addFactors(pair_fid, aid_pairs)

    if False:
        Inf = opengm.inference.BeliefPropagation
        parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    else:
        Inf = opengm.inference.Multicut
        parameter = opengm.InfParam()

    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):

        def __init__(self,):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)
    inf.infer(visitor)
    print(callback.labels)

    print(cost_matrix)
    pt.imshow(cost_matrix, cmap='magma')
    opengm.visualizeGm(gm=gm)
    pass
示例#47
0
            vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),
                                                    sigma))

    a = None
    b = None
    if len(feat) > 0:
        a = numpy.rollaxis(numpy.array(feat), axis=0, start=3)
    if len(featB) > 0:
        b = numpy.rollaxis(numpy.array(featB), axis=0, start=3)
    return a, b


for mi in range(nModels):
    #print mi

    gm = opengm.gm(numpy.ones(numVar) * nLables)
    gt = makeGt(shape)
    gtFlat = gt.reshape([-1])

    unaries, binaries = makeFeatures(gt)

    # print unaries, binaries

    for x in range(shape[0]):
        for y in range(shape[1]):
            uFeat = numpy.append(unaries[x, y, :], [1])

            #print uFeat
            #print uWeightIds
            #print(unaries[x,y,:])
            #print(unaries.shape)
示例#48
0
            0], " infile hdf5-outfile red green blue T lambda"
        sys.exit(0)

    img = vigra.readImage(args[1])

    if img.shape[2] != 3:
        print "Image must be RGB"
        sys.exit(0)

    T = float(args[6])
    beta = float(args[7])

    imgFlat = img.reshape([-1, 3]).view(numpy.ndarray)
    numVar = imgFlat.shape[0]

    gm = opengm.gm(numpy.ones(numVar, dtype=opengm.label_type) * 2)

    protoColor = numpy.array([args[3], args[4], args[5]],
                             dtype=opengm.value_type).reshape([3, -1])
    protoColor = numpy.repeat(protoColor, numVar, axis=1).swapaxes(0, 1)
    diffArray = numpy.sum(numpy.abs(imgFlat - protoColor), axis=1)
    unaries = numpy.ones([numVar, 2], dtype=opengm.value_type)
    unaries[:, 0] = T
    unaries[:, 1] = diffArray

    print diffArray

    gm.addFactors(gm.addFunctions(unaries), numpy.arange(numVar))

    regularizer = opengm.pottsFunction([2, 2], 0.0, beta)
    gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
示例#49
0
def inference_ogm(unary_potentials,
                  pairwise_potentials,
                  edges,
                  return_energy=False,
                  alg='dd',
                  init=None,
                  reserveNumFactorsPerVariable=2,
                  **kwargs):
    """Inference with OpenGM backend.

    Parameters
    ----------
    unary_potentials : nd-array, shape (n_nodes, n_nodes)
        Unary potentials of energy function.

    pairwise_potentials : nd-array, shape (n_states, n_states) or (n_states, n_states, n_edges).
        Pairwise potentials of energy function.
        If the first case, edge potentials are assumed to be the same for all edges.
        In the second case, the sequence needs to correspond to the edges.

    edges : nd-array, shape (n_edges, 2)
        Graph edges for pairwise potentials, given as pair of node indices. As
        pairwise potentials are not assumed to be symmetric, the direction of
        the edge matters.

    alg : string
        Possible choices currently are:
            * 'bp' for Loopy Belief Propagation.
            * 'dd' for Dual Decomposition via Subgradients.
            * 'trws' for Vladimirs TRWs implementation.
            * 'trw' for OGM  TRW.
            * 'gibbs' for Gibbs sampling.
            * 'lf' for Lazy Flipper
            * 'fm' for Fusion Moves (alpha-expansion fusion)
            * 'dyn' for Dynamic Programming (message passing in trees)
            * 'gc' for Graph Cut
            * 'alphaexp' for Alpha Expansion using Graph Cuts
            * 'mqpbo' for multi-label qpbo

    init : nd-array
        Initial solution for starting inference (ignored by some algorithms).

    reserveNumFactorsPerVariable :
        reserve a certain number of factors for each variable can speed up
        the building of a graphical model.
        ( For a 2d grid with second order factors one should set this to 5
         4 2-factors and 1 unary factor for most pixels )

    Returns
    -------
    labels : nd-array
        Approximate (usually) MAP variable assignment.
    """

    import opengm
    n_states, pairwise_potentials = \
        _validate_params(unary_potentials, pairwise_potentials, edges)
    n_nodes = len(unary_potentials)

    gm = opengm.gm(np.ones(n_nodes, dtype=opengm.label_type) * n_states)

    nFactors = int(n_nodes + edges.shape[0])
    gm.reserveFactors(nFactors)
    gm.reserveFunctions(nFactors, 'explicit')

    # all unaries as one numpy array
    # (opengm's value_type == float64 but all types are accepted)
    unaries = np.require(unary_potentials, dtype=opengm.value_type) * -1.0
    # add all unart functions at once
    fidUnaries = gm.addFunctions(unaries)
    visUnaries = np.arange(n_nodes, dtype=opengm.label_type)
    # add all unary factors at once
    gm.addFactors(fidUnaries, visUnaries)

    # add all pariwise functions at once
    # - first axis of secondOrderFunctions iterates over the function)

    secondOrderFunctions = -np.require(pairwise_potentials,
                                       dtype=opengm.value_type)
    fidSecondOrder = gm.addFunctions(secondOrderFunctions)
    gm.addFactors(fidSecondOrder, edges.astype(np.uint64))

    if alg == 'bp':
        inference = opengm.inference.BeliefPropagation(gm)
    elif alg == 'dd':
        inference = opengm.inference.DualDecompositionSubgradient(gm)
    elif alg == 'trws':
        inference = opengm.inference.TrwsExternal(gm)
    elif alg == 'trw':
        inference = opengm.inference.TreeReweightedBp(gm)
    elif alg == 'gibbs':
        inference = opengm.inference.Gibbs(gm)
    elif alg == 'lf':
        inference = opengm.inference.LazyFlipper(gm)
    elif alg == 'icm':
        inference = opengm.inference.Icm(gm)
    elif alg == 'dyn':
        inference = opengm.inference.DynamicProgramming(gm)
    elif alg == 'fm':
        inference = opengm.inference.AlphaExpansionFusion(gm)
    elif alg == 'gc':
        inference = opengm.inference.GraphCut(gm)
    elif alg == 'loc':
        inference = opengm.inference.Loc(gm)
    elif alg == 'mqpbo':
        inference = opengm.inference.Mqpbo(gm)
    elif alg == 'alphaexp':
        inference = opengm.inference.AlphaExpansion(gm)
    if init is not None:
        inference.setStartingPoint(init)

    inference.infer()
    # we convert the result to int from unsigned int
    # because otherwise we are sure to shoot ourself in the foot
    res = inference.arg().astype(np.int)
    if return_energy:
        return res, gm.evaluate(res)
    return res
import numpy 
import opengm
import matplotlib.pyplot as plt

f1=numpy.ones([2])
f2=numpy.ones([2,2])

#Chain (non-shared functions):
numVar=5
gm=opengm.gm([2]*numVar)
for vi in xrange(numVar):
    gm.addFactor(gm.addFunction(f1),vi)
    if(vi+1<numVar):
        gm.addFactor(gm.addFunction(f2),[vi,vi+1])

# visualize gm        
opengm.visualizeGm( gm,show=False,layout='spring',plotFunctions=True,
                    plotNonShared=True,relNodeSize=0.4)
plt.savefig("chain_non_shared.png",bbox_inches='tight',dpi=300)  
plt.close()

#Chain (shared high order functions):
numVar=5
gm=opengm.gm([2]*numVar)
fid2=gm.addFunction(f2)
for vi in xrange(numVar):
    gm.addFactor(gm.addFunction(f1),vi)
    if(vi+1<numVar):
        gm.addFactor(fid2,[vi,vi+1])

# visualize gm  
示例#51
0
import matplotlib.pyplot as plt


f1=numpy.ones([2])
f2=numpy.ones([2,2])

"""
Grid:
    - 4x4=16 variables
    - second order factors in 4-neigbourhood
      all connected to the same function
    - higher order functions are shared
"""

size=3
gm=opengm.gm([2]*size*size)

fid=gm.addFunction(f2)
for y in range(size):   
    for x in range(size):
        gm.addFactor(gm.addFunction(f1),x*size+y)
        if(x+1<size):
            gm.addFactor(fid,[x*size+y,(x+1)*size+y])
        if(y+1<size):
            gm.addFactor(fid,[x*size+y,x*size+(y+1)])


opengm.visualizeGm( gm,layout='spring',iterations=3000,
                    show=True,plotFunctions=True,
                    plotNonShared=True,relNodeSize=0.4)
plt.show
示例#52
0
def inference_ogm(unary_potentials, pairwise_potentials, edges,
                  return_energy=False, alg='dd', init=None,
                  reserveNumFactorsPerVariable=2, **kwargs):
    """Inference with OpenGM backend.

    Parameters
    ----------
    unary_potentials : nd-array
        Unary potentials of energy function.

    pairwise_potentials : nd-array
        Pairwise potentials of energy function.

    edges : nd-array
        Edges of energy function.

    alg : string
        Possible choices currently are:
            * 'bp' for Loopy Belief Propagation.
            * 'dd' for Dual Decomposition via Subgradients.
            * 'trws' for Vladimirs TRWs implementation.
            * 'trw' for OGM  TRW.
            * 'gibbs' for Gibbs sampling.
            * 'lf' for Lazy Flipper
            * 'fm' for Fusion Moves (alpha-expansion fusion)
            * 'dyn' for Dynamic Programming (message passing in trees)
            * 'gc' for Graph Cut
            * 'alphaexp' for Alpha Expansion using Graph Cuts
            * 'mqpbo' for multi-label qpbo

    init : nd-array
        Initial solution for starting inference (ignored by some algorithms).

    reserveNumFactorsPerVariable :
        reserve a certain number of factors for each variable can speed up
        the building of a graphical model.
        ( For a 2d grid with second order factors one should set this to 5
         4 2-factors and 1 unary factor for most pixels )

    Returns
    -------
    labels : nd-array
        Approximate (usually) MAP variable assignment.
    """

    import opengm
    n_states, pairwise_potentials = \
        _validate_params(unary_potentials, pairwise_potentials, edges)
    n_nodes = len(unary_potentials)

    gm = opengm.gm(np.ones(n_nodes, dtype=opengm.label_type) * n_states)

    nFactors = int(n_nodes + edges.shape[0])
    gm.reserveFactors(nFactors)
    gm.reserveFunctions(nFactors, 'explicit')

    # all unaries as one numpy array
    # (opengm's value_type == float64 but all types are accepted)
    unaries = np.require(unary_potentials, dtype=opengm.value_type) * -1.0
    # add all unart functions at once
    fidUnaries = gm.addFunctions(unaries)
    visUnaries = np.arange(n_nodes, dtype=opengm.label_type)
    # add all unary factors at once
    gm.addFactors(fidUnaries, visUnaries)

    # add all pariwise functions at once
    # - first axis of secondOrderFunctions iterates over the function)

    secondOrderFunctions = -np.require(pairwise_potentials,
                                       dtype=opengm.value_type)
    fidSecondOrder = gm.addFunctions(secondOrderFunctions)
    gm.addFactors(fidSecondOrder, edges.astype(np.uint64))

    if alg == 'bp':
        inference = opengm.inference.BeliefPropagation(gm)
    elif alg == 'dd':
        inference = opengm.inference.DualDecompositionSubgradient(gm)
    elif alg == 'trws':
        inference = opengm.inference.TrwsExternal(gm)
    elif alg == 'trw':
        inference = opengm.inference.TreeReweightedBp(gm)
    elif alg == 'gibbs':
        inference = opengm.inference.Gibbs(gm)
    elif alg == 'lf':
        inference = opengm.inference.LazyFlipper(gm)
    elif alg == 'icm':
        inference = opengm.inference.Icm(gm)
    elif alg == 'dyn':
        inference = opengm.inference.DynamicProgramming(gm)
    elif alg == 'fm':
        inference = opengm.inference.AlphaExpansionFusion(gm)
    elif alg == 'gc':
        inference = opengm.inference.GraphCut(gm)
    elif alg == 'loc':
        inference = opengm.inference.Loc(gm)
    elif alg == 'mqpbo':
        inference = opengm.inference.Mqpbo(gm)
    elif alg == 'alphaexp':
        inference = opengm.inference.AlphaExpansion(gm)
    if init is not None:
        inference.setStartingPoint(init)

    inference.infer()
    # we convert the result to int from unsigned int
    # because otherwise we are sure to shoot ourself in the foot
    res = inference.arg().astype(np.int)
    if return_energy:
        return res, gm.evaluate(res)
    return res
示例#53
0
tgrid 	= cgp2d.TopologicalGrid(seg.astype(numpy.uint64))
cgp  	= cgp2d.Cgp(tgrid)


imgTopo  	= vigra.sampling.resize(imgLab,cgp.shape)
imgRGBTopo  = vigra.colors.transform_Lab2RGB(imgTopo)
gradTopo 	= vigra.filters.gaussianGradientMagnitude(imgTopo,1.0)
labelsTopo  = vigra.sampling.resize(seg.astype(numpy.float32),cgp.shape,0)



nVar 	= cgp.numCells(2)
nFac 	= cgp.numCells(1)
space 	= numpy.ones(nVar,dtype=opengm.label_type)*nVar
gm   	= opengm.gm(space)
wZero  	= numpy.zeros(nFac,dtype=opengm.value_type)
pf 		= opengm.pottsFunctions([nVar,nVar],wZero,wZero)
fids 	= gm.addFunctions(pf)
gm.addFactors(fids,cgp.cell1BoundsArray()-1)
cgc 	= opengm.inference.Cgc(gm=gm,parameter=opengm.InfParam(planar=True))


# visualize segmetation

#cgp2d.visualize(img_rgb=imgRGBTopo,cgp=cgp)#,edge_data_in=bestState.astype(numpy.float32))


argDual  = numpy.zeros(cgp.numCells(1),dtype=numpy.uint64)

示例#54
0
import opengm
import numpy

chainLength=5
numLabels=100
numberOfStates=numpy.ones(chainLength,dtype=opengm.label_type)*numLabels
gm=opengm.gm(numberOfStates,operator='adder')
#add some random unaries
for vi in range(chainLength):
   unaryFuction=numpy.random.random(numLabels)
   gm.addFactor(gm.addFunction(unaryFuction),[vi])
#add one 2.order function


f=opengm.differenceFunction(shape=[numLabels]*2,weight=0.1)
print type(f),f
fid=gm.addFunction(f)
#add factors on a chain
for vi in range(chainLength-1):
   gm.addFactor(fid,[vi,vi+1])    


inf = opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(steps=40,convergenceBound=0 ,damping=0.9))
inf.infer(inf.verboseVisitor())


print inf.arg()

示例#55
0
	img = vigra.readImage(args[1])

	if img.shape[2]!=3:
		print "Image must be RGB"
		sys.exit(0)

	T 	 = float(args[6])
	beta = float(args[7])

	

	imgFlat = img.reshape([-1,3]).view(numpy.ndarray)
	numVar  = imgFlat.shape[0]


	gm = opengm.gm(numpy.ones(numVar,dtype=opengm.label_type)*2)

	protoColor = numpy.array([args[3],args[4],args[5]],dtype=opengm.value_type).reshape([3,-1])
	protoColor = numpy.repeat(protoColor,numVar,axis=1).swapaxes(0,1)
	diffArray  = numpy.sum(numpy.abs(imgFlat - protoColor),axis=1)
	unaries    = numpy.ones([numVar,2],dtype=opengm.value_type)
	unaries[:,0]=T
	unaries[:,1]=diffArray

	print diffArray

	gm.addFactors(gm.addFunctions(unaries),numpy.arange(numVar))


	regularizer=opengm.pottsFunction([2,2],0.0,beta)
	gridVariableIndices=opengm.secondOrderGridVis(img.shape[0],img.shape[1])