def classProportions( labs ):
  res = np.histogram( labs, bins=range(pomio.getNumClasses()+1) )[0].astype(float)
  s = res.sum()
  if s>0:
    prop = res / s
  else:
    prop = np.zeros( (pomio.getNumClasses(),), dtype=float )
  return prop, res
def classProportions(labs):
    res = np.histogram(labs,
                       bins=range(pomio.getNumClasses() + 1))[0].astype(float)
    s = res.sum()
    if s > 0:
        prop = res / s
    else:
        prop = np.zeros((pomio.getNumClasses(), ), dtype=float)
    return prop, res
def reportAccuracy( exptName, labs, predlabs ):
  print exptName, ' accuracy (frac correct) = ', np.mean(predlabs==labs)
  apc = accuracyPerClass( labs, predlabs )
  print '   - average accuracy per class = ', apc.mean()
  for i in range(pomio.getNumClasses()):
    print '      %s: %f' %( pomio.getClasses()[i], apc[i] )
  clp, clnum = classProportions( labs )
  print '   - class proportions in %s:' % exptName
  for i in range(pomio.getNumClasses()):
    print '      %15s: %.6f (%6d examples)' %( pomio.getClasses()[i], clp[i], clnum[i] )
def reportAccuracy(exptName, labs, predlabs):
    print exptName, ' accuracy (frac correct) = ', np.mean(predlabs == labs)
    apc = accuracyPerClass(labs, predlabs)
    print '   - average accuracy per class = ', apc.mean()
    for i in range(pomio.getNumClasses()):
        print '      %s: %f' % (pomio.getClasses()[i], apc[i])
    clp, clnum = classProportions(labs)
    print '   - class proportions in %s:' % exptName
    for i in range(pomio.getNumClasses()):
        print '      %15s: %.6f (%6d examples)' % (pomio.getClasses()[i],
                                                   clp[i], clnum[i])
Exemplo n.º 5
0
def classifyFeatures( features, classifier, requireAllClasses=True ):
    if requireAllClasses:
        assert classifier.classes_ == np.arange( pomio.getNumClasses() ), \
            'Error: given classifier only has %d classes - %s' % \
            ( len(classifier.classes_), str(classifier.classes_) )
    c = classifier.predict( features )
    return c
Exemplo n.º 6
0
def classifyFeatures(features, classifier, requireAllClasses=True):
    if requireAllClasses:
        assert classifier.classes_ == np.arange( pomio.getNumClasses() ), \
            'Error: given classifier only has %d classes - %s' % \
            ( len(classifier.classes_), str(classifier.classes_) )
    c = classifier.predict(features)
    return c
def trainLogisticRegressionModel(
    featureData, labels, Cvalue, outputClassifierFile, scaleData=True, requireAllClasses=True
    ):
    # See [http://scikit-learn.org/dev/modules/generated/sklearn.linear_model.LogisticRegression.html]
    # Features are numPixel x numFeature np arrays, labels are numPixel np array
    numTrainDataPoints = np.shape(featureData)[0]
    numDataLabels = np.size(labels)
    
    assert ( np.size( np.shape(labels) ) == 1) , ("Labels should be a 1d array.  Shape of labels = " + str(np.shape(labels)))
    assert ( numTrainDataPoints == numDataLabels) , ("The length of the feature and label data arrays must be equal.  Num data points=" + str(numTrainDataPoints) + ", labels=" + str(numDataLabels) )
    classLabels = np.unique(labels)
    assert not requireAllClasses or \
        ( np.size(classLabels) == pomio.getNumClasses() or np.size(classLabels) == pomio.getNumLabels() ), \
        "Training data does not contains all classes::\n\t" + str(classLabels)
     
    if scaleData == True:
        featureData = preprocessing.scale(featureData)
    
    # sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None)
    lrc = LogisticRegression(penalty='l1' , dual=False, tol=0.0001, C=Cvalue, fit_intercept=True, intercept_scaling=1)
    lrc.fit(featureData, labels)
    
    pickleObject(lrc, outputClassifierFile)
    print "LogisticRegression classifier saved to " + str(outputClassifierFile)
    
    return lrc
def accuracyPerClass(labsGT, labsPred):
    n = pomio.getNumClasses()
    correct = (labsGT == labsPred)
    res = np.zeros((n, ), dtype=float)
    for i in range(n):
        msk = labsGT == i
        if np.any(msk):
            res[i] = np.mean(correct[labsGT == i])
    return res
def accuracyPerClass( labsGT, labsPred ):
  n = pomio.getNumClasses()
  correct = (labsGT == labsPred)
  res = np.zeros( (n,), dtype=float )
  for i in range(n):
    msk = labsGT == i
    if np.any(msk):
      res[i] = np.mean( correct[ labsGT == i ] )
  return res
Exemplo n.º 10
0
def classProbsOfFeatures( features, classifier, requireAllClasses=True ):
    if requireAllClasses:
        assert classifier.classes_ == np.arange( pomio.getNumClasses() ), \
            'Error: given classifier only has %d classes - %s' % \
            ( len(classifier.classes_), str(classifier.classes_) )
    probs = classifier.predict_proba( features )
    if len(classifier.classes_) != pomio.getNumClasses():
        # Transform class probs to the correct sized matrix.
        nbClasses = pomio.getNumClasses()
        n = probs.shape[0]
        cpnew = np.zeros( (n, nbClasses) )
        for i in range( probs.shape[1] ):
            # stuff this set of probs to new label
            cpnew[:,classifier.classes_[i]-1] = probs[:,i] 
        probs = cpnew
        del cpnew

    assert probs.shape[1] == pomio.getNumClasses()
    return probs
Exemplo n.º 11
0
def classProbsOfFeatures(features, classifier, requireAllClasses=True):
    if requireAllClasses:
        assert classifier.classes_ == np.arange( pomio.getNumClasses() ), \
            'Error: given classifier only has %d classes - %s' % \
            ( len(classifier.classes_), str(classifier.classes_) )
    probs = classifier.predict_proba(features)
    if len(classifier.classes_) != pomio.getNumClasses():
        # Transform class probs to the correct sized matrix.
        nbClasses = pomio.getNumClasses()
        n = probs.shape[0]
        cpnew = np.zeros((n, nbClasses))
        for i in range(probs.shape[1]):
            # stuff this set of probs to new label
            cpnew[:, classifier.classes_[i] - 1] = probs[:, i]
        probs = cpnew
        del cpnew

    assert probs.shape[1] == pomio.getNumClasses()
    return probs
Exemplo n.º 12
0
def classProbsOfFeatures( features, classifier ):
    assert np.all( classifier.classes_ == np.arange( pomio.getNumClasses() ) ), \
        'Error: given classifier  has %d classes, expecting %d - %s' % \
        ( len(classifier.classes_), pomio.getNumClasses(), str(classifier.classes_) )
    probs = classifier.predict_proba( features )
    # Can't happen now due to above assertion
    # if len(classifier.classes_) != pomio.getNumClasses():
    #     # Transform class probs to the correct sized matrix.
    #     nbClasses = pomio.getNumClasses()
    #     n = probs.shape[0]
    #     cpnew = np.zeros( (n, nbClasses) )
    #     for i in range( probs.shape[1] ):
    #         # stuff this set of probs to new label
    #         cpnew[:,classifier.classes_[i]] = probs[:,i] 
    #     probs = cpnew
    #     del cpnew

    assert probs.shape[1] == pomio.getNumClasses()
    assert probs.shape[0] == features.shape[0]
    return probs
Exemplo n.º 13
0
def generateImagePredictionClassDist(rgbImage,
                                     classifier,
                                     requireAllClasses=True):
    """This image takes an RGB image as an (i,j,3) numpy array, a scikit-learn classifier and produces probability distribution over each pixel and class.
    Returns an (i,j,N) numpy array where N= total number of classes for use in subsequent modelling."""

    # TODO Broaden to cope with more classifiers :)
    #assert (str(type(classifier)) == "<class 'sklearn.linear_model.logistic.LogisticRegression'>") , "Check classifier type value:: " + str(type(classifier))
    testClassifier = None

    imageDimensions = rgbImage[:, :, 0].shape
    nbCols = imageDimensions[1]
    nbRows = imageDimensions[0]
    #params = classifier.get_params(deep=True)

    #print "Classifier paras::" , params

    # Take image, generate features, use classifier to predict labels, ensure normalised dist and shape to (i,j,N) np.array

    # generate predictions for the image
    # todo: replace with features.computePixelFeatures JRS
    imagePixelFeatures = FeatureGenerator.generatePixelFeaturesForImage(
        rgbImage)
    #print imagePixelFeatures
    predictedPixelLabels = classifier.predict(imagePixelFeatures)
    predictionProbs = classifier.predict_proba(imagePixelFeatures)
    print "\nShape of predicted labels::", np.shape(predictedPixelLabels)
    print "\nShape of prediction probs::", np.shape(predictionProbs)
    numClasses = pomio.getNumClasses()

    assert not requireAllClasses or \
        (np.shape(predictionProbs)[1] == numClasses or \
             np.shape(predictionProbs)[1] == numClasses+1) , \
             "Classifer prediction does not match all classes (23 or 24):: " + \
             str(np.shape(predictionProbs)[1])
    print predictionProbs

    #!!predictionProbs = np.reshape(predictionProbs, (nbCols, nbRows, numClasses ))
    print 'reshaping to ', (nbCols, nbRows, predictionProbs.shape[1])
    predictionProbs = np.reshape(predictionProbs,
                                 (nbRows, nbCols, predictionProbs.shape[1]))

    return predictionProbs
Exemplo n.º 14
0
def evaluateConfusionMatrix(predictedImg, gtImg):

    assert np.shape(predictedImg) == np.shape(
        gtImg), "Predict image and ground truth image are not the same size..."

    numClasses = pomio.getNumClasses()

    confusionMatrix = np.zeros([numClasses, numClasses], int)
    # rows are actual, cols are predicted
    for cl in range(numClasses):
        clMask = (gtImg == cl)
        # It's easy, just histogram those values
        vals = predictedImg[clMask]
        assert np.all(np.logical_and(0 <= vals, vals < numClasses)), vals.max()
        confusionMatrix[cl, :] = np.histogram(vals, range(numClasses + 1))[0]

    assert confusionMatrix.sum() == np.count_nonzero(
        gtImg != pomio.getVoidIdx())

    return confusionMatrix
def generateImagePredictionClassDist(rgbImage, classifier, requireAllClasses=True):
    """This image takes an RGB image as an (i,j,3) numpy array, a scikit-learn classifier and produces probability distribution over each pixel and class.
    Returns an (i,j,N) numpy array where N= total number of classes for use in subsequent modelling."""
    
    # TODO Broaden to cope with more classifiers :)
    #assert (str(type(classifier)) == "<class 'sklearn.linear_model.logistic.LogisticRegression'>") , "Check classifier type value:: " + str(type(classifier)) 
    testClassifier = None
    
    imageDimensions = rgbImage[:,:,0].shape
    nbCols = imageDimensions[1]
    nbRows = imageDimensions[0]
    #params = classifier.get_params(deep=True)
    
    #print "Classifier paras::" , params
    
    # Take image, generate features, use classifier to predict labels, ensure normalised dist and shape to (i,j,N) np.array
    
    # generate predictions for the image
        # todo: replace with features.computePixelFeatures JRS
    imagePixelFeatures = FeatureGenerator.generatePixelFeaturesForImage(rgbImage)
    #print imagePixelFeatures
    predictedPixelLabels = classifier.predict(imagePixelFeatures)
    predictionProbs = classifier.predict_proba(imagePixelFeatures)
    print "\nShape of predicted labels::" , np.shape(predictedPixelLabels)
    print "\nShape of prediction probs::" , np.shape(predictionProbs)
    numClasses = pomio.getNumClasses()
    
    assert not requireAllClasses or \
        (np.shape(predictionProbs)[1] == numClasses or \
             np.shape(predictionProbs)[1] == numClasses+1) , \
             "Classifer prediction does not match all classes (23 or 24):: " + \
             str(np.shape(predictionProbs)[1])
    print predictionProbs
    
    #!!predictionProbs = np.reshape(predictionProbs, (nbCols, nbRows, numClasses ))
    print 'reshaping to ', (nbCols, nbRows, predictionProbs.shape[1] )
    predictionProbs = np.reshape(predictionProbs, (nbRows, nbCols, predictionProbs.shape[1] ))
    
    return predictionProbs
Exemplo n.º 16
0
def trainLogisticRegressionModel(featureData,
                                 labels,
                                 Cvalue,
                                 outputClassifierFile,
                                 scaleData=True,
                                 requireAllClasses=True):
    # See [http://scikit-learn.org/dev/modules/generated/sklearn.linear_model.LogisticRegression.html]
    # Features are numPixel x numFeature np arrays, labels are numPixel np array
    numTrainDataPoints = np.shape(featureData)[0]
    numDataLabels = np.size(labels)

    assert (np.size(np.shape(labels)) == 1), (
        "Labels should be a 1d array.  Shape of labels = " +
        str(np.shape(labels)))
    assert (numTrainDataPoints == numDataLabels), (
        "The length of the feature and label data arrays must be equal.  Num data points="
        + str(numTrainDataPoints) + ", labels=" + str(numDataLabels))
    classLabels = np.unique(labels)
    assert not requireAllClasses or \
        ( np.size(classLabels) == pomio.getNumClasses() or np.size(classLabels) == pomio.getNumLabels() ), \
        "Training data does not contains all classes::\n\t" + str(classLabels)

    if scaleData == True:
        featureData = preprocessing.scale(featureData)

    # sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None)
    lrc = LogisticRegression(penalty='l1',
                             dual=False,
                             tol=0.0001,
                             C=Cvalue,
                             fit_intercept=True,
                             intercept_scaling=1)
    lrc.fit(featureData, labels)

    pickleObject(lrc, outputClassifierFile)
    print "LogisticRegression classifier saved to " + str(outputClassifierFile)

    return lrc
Exemplo n.º 17
0
 if 0:
     # logistic regression
     # Use fixed C
     C = 0.5
     classifier = trainLogisticRegressionModel(
         trainingData[0][subset,:], trainingData[1][subset], C, classifierBaseFilename, \
             scaleData=True, \
             requireAllClasses=False
         )
 elif 0:
     # Neural Network
     # Construct nn dataset
     datmat = trainingData[0][subset, :]
     labvec = trainingData[1][subset]
     nbFeatures = datmat.shape[1]
     nbClasses = pomio.getNumClasses()
     nbHidden = 100
     maxIter = 200
     classifier = NeuralNet.NNet(nbFeatures, nbClasses, nbHidden)
     nnds = classifier.createTrainingSetFromMatrix(datmat, labvec)
     classifier.trainNetworkBackprop(nnds, maxIter)
 else:
     #classifier = None
     # Random forest
     datmat = trainingData[0][subset, :]
     labvec = trainingData[1][subset]
     print '**Training a random forest on %d examples...' % len(labvec)
     print 'Labels represented: ', np.unique(labvec)
     classifier = sklearn.ensemble.RandomForestClassifier(\
         n_estimators=100)
     classifier = classifier.fit(datmat, labvec)
Exemplo n.º 18
0
def getSuperPixelData(msrcImages, numberSuperPixels, superPixelCompactness):

    # Should probably make this a call to pomio in case the ordering changes in the future...
    voidClassLabel = pomio.getVoidIdx()

    numberImages = len(msrcImages)

    # for each image:
    #   determine superpixel label (discard if void)
    #   compute superpixel features of valid superpixels
    #   append features to cumulative array of all super pixel features
    #   append label to array of all labels

    superPixelFeatures = None
    superPixelLabels = np.array([], int)  # used for superpixel labels
    numberVoidSuperPixels = 0  # keep track of void superpixels

    nbClasses = pomio.getNumClasses()
    classAdjCounts = np.zeros((nbClasses, nbClasses))
    adjCountsTotal = 0
    adjVoidCountsTotal = 0

    for imgIdx in range(0, numberImages):

        superPixelIgnoreList = np.array(
            [], int
        )  # this is used to skip over the superpixel in feature processing

        print "\n**Processing Image#", (imgIdx + 1), " of", numberImages

        # get raw image and ground truth labels
        img = msrcImages[imgIdx].m_img
        imgPixelLabels = msrcImages[imgIdx].m_gt

        # create superpixel map and graph for image
        spgraph = SuperPixels.computeSuperPixelGraph(
            img, 'slic', [numberSuperPixels, superPixelCompactness])
        imgSuperPixelMask = spgraph.m_labels
        imgSuperPixels = spgraph.m_nodes
        numberImgSuperPixels = spgraph.getNumSuperPixels()

        # create superpixel exclude list & superpixel label array
        allSPClassLabels = []
        for spIdx in range(0, numberImgSuperPixels):

            superPixelValue = imgSuperPixels[spIdx]
            #print "\tINFO: Processing superpixel =", superPixelValue , " of" , numberImgSuperPixels, " in image"

            # Assume superpixel labels are sequence of integers
            superPixelValueMask = (
                imgSuperPixelMask == superPixelValue
            )  # Boolean array for indexing superpixel-pixels
            superPixelLabel = assignClassLabelToSuperPixel(
                superPixelValueMask, imgPixelLabels)
            allSPClassLabels.append(superPixelLabel)

            if (superPixelLabel == voidClassLabel):

                # add to ignore list, increment void count & do not add to superpixel label array
                superPixelIgnoreList = np.append(superPixelIgnoreList,
                                                 superPixelValue)
                numberVoidSuperPixels = numberVoidSuperPixels + 1

            else:
                superPixelLabels = np.append(superPixelLabels, superPixelLabel)

        assert len(allSPClassLabels) == numberImgSuperPixels
        (theseClassAdjCounts, adjVoidCount,
         adjCount) = spgraph.countClassAdjacencies(nbClasses, allSPClassLabels)
        classAdjCounts += theseClassAdjCounts
        adjCountsTotal += adjCount
        adjVoidCountsTotal += adjVoidCount

        # Now we have the superpixel labels, and an ignore list of void superpixels - time to get the features!
        imgSuperPixelFeatures = FeatureGenerator.generateSuperPixelFeatures(
            img, imgSuperPixelMask, excludeSuperPixelList=superPixelIgnoreList)

        if superPixelFeatures == None:
            superPixelFeatures = imgSuperPixelFeatures
        else:
            # stack the superpixel features into a single list
            superPixelFeatures = np.vstack(
                [superPixelFeatures, imgSuperPixelFeatures])

    assert np.shape(superPixelFeatures)[0] == np.shape(
        superPixelFeatures)[0], "Number of samples != number labels"
    print "\n**Processed total of", numberImages, "images"
    print "  %d out of %d adjacencies were ignored due to void (%.2f %%)" % \
        (adjVoidCountsTotal, adjCountsTotal, \
             100.0*adjVoidCountsTotal/adjCountsTotal)

    # Now return the results
    return [superPixelFeatures, superPixelLabels, classAdjCounts]
Exemplo n.º 19
0
import numpy as np
"""
Neural Network Classifier
"""

#from amb.seg import pomio, FeatureGenerator
import pomio
import FeatureGenerator

from pybrain.tools.shortcuts import buildNetwork

from pybrain.supervised import BackpropTrainer
from pybrain.datasets.classification import ClassificationDataSet

numFeatures = 86
numClasses = pomio.getNumClasses()  # no void class
voidClass = 13


class NNet:
    def __init__(self, numFeatures, numClasses, nbHidden=None):
        if nbHidden == None:
            nbHidden = np.round((numFeatures + numClasses) / 2,
                                0).astype('int')
        self.net = buildNetwork(numFeatures, nbHidden, numClasses, bias=True)
        self.nbFeatures = numFeatures
        self.nbClasses = numClasses
        print "\tNetwork creation complete:"
        print "\t\tinputLayer=", self.net['in']
        print "\t\thiddenLayer=", self.net['hidden0']
        print "\t\toutputLayer=", self.net['out']
Exemplo n.º 20
0
import numpy as np

#from amb.seg import pomio, FeatureGenerator
import pomio
import FeatureGenerator

from pybrain.tools.shortcuts import buildNetwork

from pybrain.supervised import BackpropTrainer
from pybrain.datasets.classification import ClassificationDataSet


numFeatures = 86
numClasses = pomio.getNumClasses() # no void class
voidClass = 13

class NNet:


    def __init__(self, numFeatures, numClasses, nbHidden = None):
        if nbHidden == None:
            nbHidden = np.round( (numFeatures + numClasses) / 2 , 0 ).astype('int')
        self.net = buildNetwork(numFeatures, nbHidden, numClasses, bias=True)
        self.nbFeatures = numFeatures
        self.nbClasses = numClasses
        print "\tNetwork creation complete:"
        print "\t\tinputLayer=" , self.net['in'] 
        print "\t\thiddenLayer=" , self.net['hidden0']
        print "\t\toutputLayer=" , self.net['out']
        print "\tb\tiasLayer=" , self.net['bias']
D = ftrs.shape[1]
print '%d feature vectors of dimensionality = %d' % (N,D)

if args.labs == None:
  labs = None
else:
  if args.labs.endswith('.pkl'):
      labs = pomio.unpickleObject( args.labs )
  else:
      labs = pomio.readMatFromCSV( args.labs ).astype(np.int32)


# show labels
if labs != None:
  plt.figure()
  plt.hist( labs, pomio.getNumClasses() )
  plt.title('Class counts')
  plt.xticks( range(pomio.getNumClasses()),
              pomio.getClasses()[:pomio.getNumClasses()],
              size='small' )


# show at most 9 features at once
fstarts = range( args.nstart, D, args.nshow )
plt.figure()

for fs in fstarts:
  fend = min(D-1,fs+args.nshow-1)
  print '  Displaying features %d-%d:' % (fs, fend)
  rng = range( fs, fend+1 )
  fnames = [ 'F%d' % x for x in rng ]
Exemplo n.º 22
0
 if 0:
     # logistic regression
     # Use fixed C
     C           = 0.5
     classifier = trainLogisticRegressionModel(
         trainingData[0][subset,:], trainingData[1][subset], C, classifierBaseFilename, \
             scaleData=True, \
             requireAllClasses=False
         )
 elif 0:
     # Neural Network
     # Construct nn dataset
     datmat = trainingData[0][subset,:]
     labvec = trainingData[1][subset]
     nbFeatures = datmat.shape[1]
     nbClasses = pomio.getNumClasses()
     nbHidden = 100
     maxIter = 200
     classifier = NeuralNet.NNet(nbFeatures, nbClasses, nbHidden)
     nnds = classifier.createTrainingSetFromMatrix( datmat, labvec )
     classifier.trainNetworkBackprop(nnds,maxIter)
 else:
     #classifier = None
     # Random forest
     datmat = trainingData[0][subset,:]
     labvec = trainingData[1][subset]
     print '**Training a random forest on %d examples...' % len(labvec)
     print 'Labels represented: ', np.unique( labvec )
     classifier = sklearn.ensemble.RandomForestClassifier(\
         n_estimators=100)
     classifier = classifier.fit( datmat, labvec )
def getSuperPixelData(msrcImages,numberSuperPixels, superPixelCompactness):
    
    # Should probably make this a call to pomio in case the ordering changes in the future...
    voidClassLabel = pomio.getVoidIdx()
    
    numberImages = len(msrcImages)    
    
    # for each image:
    #   determine superpixel label (discard if void)
    #   compute superpixel features of valid superpixels
    #   append features to cumulative array of all super pixel features
    #   append label to array of all labels
    
    superPixelFeatures = None
    superPixelLabels = np.array([], int) # used for superpixel labels
    numberVoidSuperPixels = 0   # keep track of void superpixels

    nbClasses = pomio.getNumClasses()
    classAdjCounts = np.zeros( (nbClasses, nbClasses) )
    adjCountsTotal = 0
    adjVoidCountsTotal = 0

    for imgIdx in range(0, numberImages):
    
        superPixelIgnoreList = np.array([], int) # this is used to skip over the superpixel in feature processing
    
        print "\n**Processing Image#" , (imgIdx + 1) , " of" , numberImages
    
        # get raw image and ground truth labels
        img = msrcImages[imgIdx].m_img
        imgPixelLabels = msrcImages[imgIdx].m_gt
        
        # create superpixel map and graph for image
        spgraph = SuperPixels.computeSuperPixelGraph( img, 'slic', [numberSuperPixels, superPixelCompactness] )
        imgSuperPixelMask = spgraph.m_labels
        imgSuperPixels = spgraph.m_nodes
        numberImgSuperPixels = spgraph.getNumSuperPixels()
    
        # create superpixel exclude list & superpixel label array
        allSPClassLabels = []
        for spIdx in range(0, numberImgSuperPixels):
            
            superPixelValue = imgSuperPixels[spIdx]
            #print "\tINFO: Processing superpixel =", superPixelValue , " of" , numberImgSuperPixels, " in image"
            
            
            # Assume superpixel labels are sequence of integers
            superPixelValueMask = (imgSuperPixelMask == superPixelValue ) # Boolean array for indexing superpixel-pixels
            superPixelLabel = assignClassLabelToSuperPixel(superPixelValueMask, imgPixelLabels)
            allSPClassLabels.append( superPixelLabel)

            if(superPixelLabel == voidClassLabel):
            
                # add to ignore list, increment void count & do not add to superpixel label array
                superPixelIgnoreList = np.append(superPixelIgnoreList, superPixelValue)
                numberVoidSuperPixels = numberVoidSuperPixels + 1
                
            else:
                superPixelLabels = np.append(superPixelLabels, superPixelLabel)
        
        assert len(allSPClassLabels) == numberImgSuperPixels
        (theseClassAdjCounts,adjVoidCount,adjCount) = spgraph.countClassAdjacencies( nbClasses, allSPClassLabels )
        classAdjCounts     += theseClassAdjCounts
        adjCountsTotal     += adjCount
        adjVoidCountsTotal += adjVoidCount

        # Now we have the superpixel labels, and an ignore list of void superpixels - time to get the features!
        imgSuperPixelFeatures = FeatureGenerator.generateSuperPixelFeatures(img, imgSuperPixelMask, excludeSuperPixelList=superPixelIgnoreList)
        
        if superPixelFeatures == None:        
            superPixelFeatures = imgSuperPixelFeatures;
        else:
            # stack the superpixel features into a single list
            superPixelFeatures = np.vstack( [ superPixelFeatures, imgSuperPixelFeatures ] )
    
    
    assert np.shape(superPixelFeatures)[0] == np.shape(superPixelFeatures)[0] , "Number of samples != number labels"
    print "\n**Processed total of" , numberImages, "images"
    print "  %d out of %d adjacencies were ignored due to void (%.2f %%)" % \
        (adjVoidCountsTotal, adjCountsTotal, \
             100.0*adjVoidCountsTotal/adjCountsTotal)

    # Now return the results
    return [ superPixelFeatures, superPixelLabels, classAdjCounts ]
Exemplo n.º 24
0
N = ftrs.shape[0]
D = ftrs.shape[1]
print '%d feature vectors of dimensionality = %d' % (N, D)

if args.labs == None:
    labs = None
else:
    if args.labs.endswith('.pkl'):
        labs = pomio.unpickleObject(args.labs)
    else:
        labs = pomio.readMatFromCSV(args.labs).astype(np.int32)

# show labels
if labs != None:
    plt.figure()
    plt.hist(labs, pomio.getNumClasses())
    plt.title('Class counts')
    plt.xticks(range(pomio.getNumClasses()),
               pomio.getClasses()[:pomio.getNumClasses()],
               size='small')

# show at most 9 features at once
fstarts = range(args.nstart, D, args.nshow)
plt.figure()

for fs in fstarts:
    fend = min(D - 1, fs + args.nshow - 1)
    print '  Displaying features %d-%d:' % (fs, fend)
    rng = range(fs, fend + 1)
    fnames = ['F%d' % x for x in rng]
    amntools.gplotmatrix(ftrs[:, rng], labs, featureNames=fnames)