Exemplo n.º 1
0
import opengm
import opengm.learning as learning
from opengm import numpy

# weight vector
nWeights = 100
weightVals = numpy.ones(nWeights) * 0.5
weights = opengm.learning.Weights(weightVals)

dataset = learning.createDataset(loss='h')

print "type of dataset", dataset

# for grid search learner
lowerBounds = numpy.zeros(nWeights)
upperBounds = numpy.ones(nWeights)
nTestPoints = numpy.ones(nWeights).astype('uint64') * 10

learner = learning.gridSearchLearner(dataset=dataset,
                                     lowerBounds=lowerBounds,
                                     upperBounds=upperBounds,
                                     nTestPoints=nTestPoints)

learner.learn(infCls=opengm.inference.BeliefPropagation,
              parameter=opengm.InfParam(damping=0.5))

# for struct max margin learner
smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(
    1.0, 0.01, 0)
smm_learner = learning.StructMaxMargin_Bundle_HammingLoss(
    dataset, smm_learnerParam)
Exemplo n.º 2
0
    # the ground truth labels
    Y = ds['Y'][:num_samples]

    # superpixels (for reference)
    #superpixels_train = ds['superpixels'][:num_samples]

    # filenames (for reference)
    #fns_train = ds['file_names'][:num_samples]

    num_edge_feats = X[0][2].shape[1]
    num_unary_feats = num_labels * X[0][0].shape[1]
    num_weights = num_unary_feats + num_edge_feats
    # create and initialize weights
    print 'num_weights =', num_weights
    print 'num_instances =', len(X)
    ogm_ds = learning.createDataset(num_weights, numInstances=len(X), loss="generalized-hamming")
    weights = ogm_ds.getWeights()

    for idx, (x, y) in enumerate(zip(X, Y)):
        y[y==-1]=0  # FIXME: introduce a void label, so long: make the void label background 
        unary_feats, edges, edge_feats = x
        num_vars = unary_feats.shape[0]

        states = np.ones(num_vars, dtype=opengm.index_type) * num_labels
        
        gm = opengm.graphicalModel(states, operator='adder')

        lossParam = learning.GeneralizedHammingLossParameter()
        lossParam.setLabelLossMultiplier(np.array(label_weights))

        # add unary factors
Exemplo n.º 3
0
    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.5),
    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
    partial(vigra.filters.gaussianGradientMagnitude, sigma=3.0),
]

dataset, test_set = superpixelDataset(imgs=imgs,
                                      sps=sps,
                                      gts=gts,
                                      numberOfLabels=3,
                                      fUnary=fUnary,
                                      fBinary=fBinary,
                                      addConstFeature=True)
if True:
    dataset.save("simple_dataset", 'simple_')
if True:
    dataset = learning.createDataset(0, numInstances=0)
    dataset.load("simple_dataset", 'simple_')
if True:

    learner = learning.subgradientSSVM(dataset,
                                       learningRate=0.1,
                                       C=100,
                                       learningMode='batch',
                                       maxIterations=1000,
                                       averaging=-1)
    learner.learn(infCls=opengm.inference.TrwsExternal,
                  parameter=opengm.InfParam())

else:
    learner = learning.maxLikelihoodLearner(dataset, temp=0.0000001)
    learner.learn()
Exemplo n.º 4
0
nLables = 2 
shape = [6, 6]
numVar = shape[0]*shape[1]
nWeights = 4

def makeGt(shape):
    gt=numpy.ones(shape,dtype='uint8')
    gt[0:shape[0]/2,:] = 0
    return gt

uWeightIds = numpy.arange(4,dtype='uint64').reshape(2,2)
print uWeightIds

bWeightIds = numpy.array([4,5,6],dtype='uint64')

dataset = learning.createDataset(numWeights=nWeights, loss='h')
weights = dataset.getWeights()

def makeFeatures(gt):
    random  = numpy.random.rand(*gt.shape)-0.5
    randGt = random + gt
    feat = []
    for sigma in [1.0, 1.5]:
        feat.append(vigra.filters.gaussianSmoothing(randGt.astype('float32'),sigma) )

    featB = []
    for sigma in [1.0, 1.5]:
        featB.append(vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),sigma) )


Exemplo n.º 5
0
    Y = ds['Y'][:num_samples]

    # superpixels (for reference)
    #superpixels_train = ds['superpixels'][:num_samples]

    # filenames (for reference)
    #fns_train = ds['file_names'][:num_samples]

    num_edge_feats = X[0][2].shape[1]
    num_unary_feats = num_labels * X[0][0].shape[1]
    num_weights = num_unary_feats + num_edge_feats
    # create and initialize weights
    print 'num_weights =', num_weights
    print 'num_instances =', len(X)

    ogm_dss[ii] = learning.createDataset(num_weights, numInstances=len(X))
    #ogm_ds = ogm_dss[ii]
    ww[ii] = ogm_dss[ii].getWeights()

    for idx, (x, y) in enumerate(zip(X, Y)):
        print idx
        y[y==-1]=0  # FIXME: introduce a void label, so long: make the void label background 
        unary_feats, edges, edge_feats = x
        num_vars = unary_feats.shape[0]

        states = np.ones(num_vars, dtype=opengm.label_type) * num_labels
        
        gm = opengm.gm(states, operator='adder')

        lossParam =  learning.LossParameter(lossType='hamming', labelMult=label_weights)
        lossParam.setLabelLossMultiplier(label_weights)
Exemplo n.º 6
0
import opengm
from opengm import learning
import numpy as np

out_dir = './'
out_prefix = 'pascal_voc_train_'

dataset = learning.createDataset(0, loss='gh')
#dataset = learning.DatasetWithGeneralizedHammingLoss(0)
dataset.load(out_dir, out_prefix)

nWeights = dataset.getNumberOfWeights()
print 'nWeights', nWeights
print 'nModels', dataset.getNumberOfModels()

# for grid search learner
lowerBounds = np.ones(nWeights) * -1.0
upperBounds = np.ones(nWeights) * 1.0
nTestPoints = np.ones(nWeights).astype('uint64') * 3

#learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)

learner.learn(infCls=opengm.inference.Icm, parameter=opengm.InfParam())

weights = dataset.getWeights()

for w in range(nWeights):
    print weights[w]

for i in range(dataset.getNumberOfModels()):
Exemplo n.º 7
0
nWeights = nUWeights + nBWeights


def makeGt(shape):
    gt = numpy.ones(shape, dtype='uint8')
    gt[0:shape[0] / 2, :] = 0
    return gt


weightVals = numpy.ones(nWeights)
weights = opengm.learning.Weights(weightVals)

uWeightIds = numpy.arange(nUWeights, dtype='uint64')
bWeightIds = numpy.arange(start=nUWeights, stop=nWeights, dtype='uint64')

dataset = learning.createDataset(numWeights=nWeights)
weights = dataset.getWeights()


def makeFeatures(gt):
    random = (numpy.random.rand(*gt.shape) - 0.5) * 5.0
    randGt = random + gt

    # vigra.imshow(randGt)
    # plt.colorbar()
    # vigra.show()

    #f = pylab.figure()
    #for n, a in enumerate([gt, randGt]):
    #    f.add_subplot(2, 1, n)  # this line outputs images on top of each other
    #    # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
Exemplo n.º 8
0
    Y = ds['Y'][:num_samples]

    # superpixels (for reference)
    #superpixels_train = ds['superpixels'][:num_samples]

    # filenames (for reference)
    #fns_train = ds['file_names'][:num_samples]

    num_edge_feats = X[0][2].shape[1]
    num_unary_feats = num_labels * X[0][0].shape[1]
    num_weights = num_unary_feats + num_edge_feats
    # create and initialize weights
    print 'num_weights =', num_weights
    print 'num_instances =', len(X)

    ogm_dss[ii] = learning.createDataset(num_weights, numInstances=len(X))
    #ogm_ds = ogm_dss[ii]
    ww[ii] = ogm_dss[ii].getWeights()

    for idx, (x, y) in enumerate(zip(X, Y)):
        print idx
        y[y ==
          -1] = 0  # FIXME: introduce a void label, so long: make the void label background
        unary_feats, edges, edge_feats = x
        num_vars = unary_feats.shape[0]

        states = np.ones(num_vars, dtype=opengm.label_type) * num_labels

        gm = opengm.gm(states, operator='adder')

        lossParam = learning.LossParameter(lossType='hamming',
Exemplo n.º 9
0
numVar = shape[0] * shape[1]
nWeights = 4


def makeGt(shape):
    gt = numpy.ones(shape, dtype='uint8')
    gt[0:shape[0] / 2, :] = 0
    return gt


uWeightIds = numpy.arange(4, dtype='uint64').reshape(2, 2)
print uWeightIds

bWeightIds = numpy.array([4, 5, 6], dtype='uint64')

dataset = learning.createDataset(numWeights=nWeights, loss='h')
weights = dataset.getWeights()


def makeFeatures(gt):
    random = numpy.random.rand(*gt.shape) - 0.5
    randGt = random + gt
    feat = []
    for sigma in [1.0, 1.5]:
        feat.append(
            vigra.filters.gaussianSmoothing(randGt.astype('float32'), sigma))

    featB = []
    for sigma in [1.0, 1.5]:
        featB.append(
            vigra.filters.gaussianGradientMagnitude(randGt.astype('float32'),
Exemplo n.º 10
0
import opengm
import opengm.learning as learning
from opengm import numpy



# weight vector
nWeights = 100
weightVals = numpy.ones(nWeights)*0.5
weights = opengm.learning.Weights(weightVals)



dataset =learning.createDataset(loss='h')

print "type of dataset", dataset


# for grid search learner
lowerBounds = numpy.zeros(nWeights)
upperBounds = numpy.ones(nWeights)
nTestPoints  =numpy.ones(nWeights).astype('uint64')*10


learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)

learner.learn(infCls=opengm.inference.BeliefPropagation, 
              parameter=opengm.InfParam(damping=0.5))

# for struct max margin learner
smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(1.0, 0.01, 0)
Exemplo n.º 11
0
def makeGt(shape):
    gt=numpy.ones(shape,dtype='uint8')
    gt[0:shape[0]/2,:] = 0
    return gt
 


weightVals = numpy.ones(nWeights)
weights = opengm.learning.Weights(weightVals)

uWeightIds = numpy.arange(nUWeights ,dtype='uint64')
bWeightIds = numpy.arange(start=nUWeights,stop=nWeights,dtype='uint64')


dataset = learning.createDataset(numWeights=nWeights)
weights = dataset.getWeights()

def makeFeatures(gt):
    random  = (numpy.random.rand(*gt.shape)-0.5)*5.0
    randGt = random + gt

    # vigra.imshow(randGt)
    # plt.colorbar()
    # vigra.show()

    #f = pylab.figure()
    #for n, a in enumerate([gt, randGt]):
    #    f.add_subplot(2, 1, n)  # this line outputs images on top of each other
    #    # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
    #    pylab.imshow(a,cmap='gray')
Exemplo n.º 12
0
    Y = ds['Y'][:num_samples]

    # superpixels (for reference)
    #superpixels_train = ds['superpixels'][:num_samples]

    # filenames (for reference)
    #fns_train = ds['file_names'][:num_samples]

    num_edge_feats = X[0][2].shape[1]
    num_unary_feats = num_labels * X[0][0].shape[1]
    num_weights = num_unary_feats + num_edge_feats
    # create and initialize weights
    print 'num_weights =', num_weights
    print 'num_instances =', len(X)
    ogm_ds = learning.createDataset(num_weights,
                                    numInstances=len(X),
                                    loss="generalized-hamming")
    weights = ogm_ds.getWeights()

    for idx, (x, y) in enumerate(zip(X, Y)):
        y[y ==
          -1] = 0  # FIXME: introduce a void label, so long: make the void label background
        unary_feats, edges, edge_feats = x
        num_vars = unary_feats.shape[0]

        states = np.ones(num_vars, dtype=opengm.index_type) * num_labels

        gm = opengm.graphicalModel(states, operator='adder')

        lossParam = learning.GeneralizedHammingLossParameter()
        lossParam.setLabelLossMultiplier(np.array(label_weights))