コード例 #1
0
ファイル: real_example.py プロジェクト: chaubold/opengm
def makeGt(shape):
    gt=numpy.ones(shape,dtype='uint8')
    gt[0:shape[0]/2,:] = 0
    return gt
コード例 #2
0
import opengm
import opengm.learning as learning
from opengm import numpy

# weight vector
nWeights = 100
weightVals = numpy.ones(nWeights) * 0.5
weights = opengm.learning.Weights(weightVals)

dataset = learning.createDataset(loss='h')

print "type of dataset", dataset

# for grid search learner
lowerBounds = numpy.zeros(nWeights)
upperBounds = numpy.ones(nWeights)
nTestPoints = numpy.ones(nWeights).astype('uint64') * 10

learner = learning.gridSearchLearner(dataset=dataset,
                                     lowerBounds=lowerBounds,
                                     upperBounds=upperBounds,
                                     nTestPoints=nTestPoints)

learner.learn(infCls=opengm.inference.BeliefPropagation,
              parameter=opengm.InfParam(damping=0.5))

# for struct max margin learner
smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(
    1.0, 0.01, 0)
smm_learner = learning.StructMaxMargin_Bundle_HammingLoss(
    dataset, smm_learnerParam)
コード例 #3
0
ファイル: real_example.py プロジェクト: chaubold/opengm
        feat.append(vigra.filters.gaussianSmoothing(randGt.astype('float32'),sigma) )

    featB = []
    for sigma in [1.0, 1.5]:
        featB.append(vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),sigma) )



    a =  numpy.rollaxis(numpy.array(feat), axis=0, start=3)
    b =  numpy.rollaxis(numpy.array(featB), axis=0, start=3)
    return a,b

for mi in range(nModels):


    gm = opengm.gm(numpy.ones(numVar)*nLables)
    gt = makeGt(shape) 
    gtFlat = gt.reshape([-1])

    unaries,binaries = makeFeatures(gt)

    # print unaries, binaries

    for x in range(shape[0]):
        for y in range(shape[1]):
            uFeat = unaries[x,y,:].astype("float64")
            uFeat = numpy.repeat(uFeat[:,numpy.newaxis],2,axis=1).T
            uFeat[1,:]=1

            lu = opengm.LUnaryFunction(weights=weights,numberOfLabels=nLables, features=uFeat, weightIds=uWeightIds)
コード例 #4
0
def makeGt(shape):
    gt = numpy.ones(shape, dtype='uint8')
    gt[0:shape[0] / 2, :] = 0
    return gt
コード例 #5
0
sSmooth = [1.0, 1.2, 1.5, 2.0, 3.0, 4.0]
sGrad = [1.0, 1.5, 2.0, 4.0]

nUWeights = len(sSmooth) + 1
nBWeights = len(sGrad) + 1
nWeights = nUWeights + nBWeights


def makeGt(shape):
    gt = numpy.ones(shape, dtype='uint8')
    gt[0:shape[0] / 2, :] = 0
    return gt


weightVals = numpy.ones(nWeights)
weights = opengm.learning.Weights(weightVals)

uWeightIds = numpy.arange(nUWeights, dtype='uint64')
bWeightIds = numpy.arange(start=nUWeights, stop=nWeights, dtype='uint64')

dataset = learning.createDataset(numWeights=nWeights)
weights = dataset.getWeights()


def makeFeatures(gt):
    random = (numpy.random.rand(*gt.shape) - 0.5) * 5.0
    randGt = random + gt

    # vigra.imshow(randGt)
    # plt.colorbar()
コード例 #6
0
# create a simple model with exactly one variable with two labels
numWeights = 4
nLabels = 2
nVars = 1

# set weight ids and features for all labels
weightIds = numpy.array([[0, 1], [2, 3]])
features = numpy.array([[0.5, -0.25], [-0.5, -1.25]])

# create dataset with 2 weights and get the 2 weights
dataset = learning.createDataset(numWeights)
weights = dataset.getWeights()

# set up graphical model
gm = opengm.gm(numpy.ones(nVars) * nLabels)
fid = gm.addFunction(learning.lUnaryFunction(weights, 2, features, weightIds))
gm.addFactor(fid, [0])

# add graphical model to dataset with ground truth
ground_truth = numpy.array([0]).astype(opengm.label_type)
dataset.pushBackInstance(gm, ground_truth)

# set up learner and run
#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
#learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
#learner.learn(infCls=opengm.inference.TrwsExternal,  parameter=opengm.InfParam())

learner = learning.maxLikelihoodLearner(
    dataset,
    maximumNumberOfIterations=1500,
コード例 #7
0
ファイル: example1.py プロジェクト: chaubold/opengm
import opengm
import opengm.learning as learning
from opengm import numpy



# weight vector
nWeights = 100
weightVals = numpy.ones(nWeights)*0.5
weights = opengm.learning.Weights(weightVals)



dataset =learning.createDataset(loss='h')

print "type of dataset", dataset


# for grid search learner
lowerBounds = numpy.zeros(nWeights)
upperBounds = numpy.ones(nWeights)
nTestPoints  =numpy.ones(nWeights).astype('uint64')*10


learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)

learner.learn(infCls=opengm.inference.BeliefPropagation, 
              parameter=opengm.InfParam(damping=0.5))

# for struct max margin learner
smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(1.0, 0.01, 0)