Пример #1
0
def evaluateCategory(scoredDetections, ranking, groundTruthFile, output=None):
    performance = []
    detections = []
    for img in scoredDetections.keys():
        data = scoredDetections[img]
        idx = range(len(data['boxes']))
        boxes = [
            data['boxes'][i] for i in idx if data[ranking][i] > float('-inf')
        ]
        scores = [
            data[ranking][i] for i in idx if data[ranking][i] > float('-inf')
        ]
        if len(boxes) > 0:
            fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
            for i in range(len(fBoxes)):
                detections.append([img, fScores[i]] + fBoxes[i])
    detections.sort(key=lambda x: x[1], reverse=True)
    gtBoxes = [x.split() for x in open(groundTruthFile)]
    numPositives = len(gtBoxes)
    groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
    results = eval.evaluateDetections(groundTruth, detections, 0.5)
    if output is not None:
        output = output + '.' + ranking
    prec, recall = eval.computePrecisionRecall(numPositives, results['tp'],
                                               results['fp'], output)
    return prec, recall
def evaluateCategory(scoredDetections, categoryIdx, maxTime, groundTruthFile, output):
  performance = []
  ## Do a time analysis evaluation
  for t in range(maxTime):
    print " ****************** TIME: {:3} ********************** ".format(t) 
    detections = []
    for img in scoredDetections.keys():
      data = scoredDetections[img]
      idx = [i for i in range(len(data['time'])) if data['time'][i] <= t]
      boxes = [data['boxes'][i] for i in idx]
      scores = [data['scores'][i][categoryIdx] for i in idx]
      if len(boxes) > 0:
        fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
        for i in range(len(fBoxes)):
          detections.append( [img, fScores[i]] + fBoxes[i] )
    detections.sort(key=lambda x:x[1], reverse=True)
    gtBoxes = [x.split() for x in open(groundTruthFile)]
    numPositives = len(gtBoxes)
    groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
    results = eval.evaluateDetections(groundTruth, detections, 0.5)
    if t == maxTime - 1:
      prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output)
    else:
      prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'])
    performance.append( [prec, recall] )
  return performance
Пример #3
0
def evaluateCategory(scoredDetections, categoryIdx, maxTime, step,
                     groundTruthFile, output):
    performance = []
    ## Do a time analysis evaluation
    T = 0
    for t in range(0, maxTime, step):
        print " ****************** TIME: {:3} ********************** ".format(
            T)
        detections = []
        for img in scoredDetections.keys():
            data = scoredDetections[img]
            idx = [i for i in range(len(data['time'])) if data['time'][i] <= t]
            boxes = [data['boxes'][i] for i in idx]
            scores = [data['scores'][i][categoryIdx] for i in idx]
            if len(boxes) > 0:
                fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
                for i in range(len(fBoxes)):
                    detections.append([img, fScores[i]] + fBoxes[i])
        detections.sort(key=lambda x: x[1], reverse=True)
        gtBoxes = [x.split() for x in open(groundTruthFile)]
        numPositives = len(gtBoxes)
        groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
        results = eval.evaluateDetections(groundTruth, detections, 0.5)
        if t == maxTime - 1:
            prec, recall = eval.computePrecisionRecall(numPositives,
                                                       results['tp'],
                                                       results['fp'], output)
        else:
            prec, recall = eval.computePrecisionRecall(numPositives,
                                                       results['tp'],
                                                       results['fp'])
        performance.append([prec, recall])
        T += 1
    return performance
 def run(self, image, features, boxes):
   s = cu.tic()
   result = {}
   boxSet = [ map(float, b[1:]) for b in boxes ]
   for i in self.catIndex:
     scores = features[:,i]
     fb,fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
     result[i] = (image, fb, fs)
   s = cu.toc(image, s)
   return result
Пример #5
0
 def run(self, image, features, boxes):
     s = cu.tic()
     result = {}
     boxSet = [map(float, b[1:]) for b in boxes]
     for i in self.catIndex:
         scores = features[:, i]
         fb, fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
         result[i] = (image, fb, fs)
     s = cu.toc(image, s)
     return result
Пример #6
0
 def run(self,img,features,bboxes):
   scores,labels = self.model.predictAll(features,bboxes)
   candIdx = scores>=self.threshold
   numCandidates = candIdx[candIdx==True].shape[0]
   if numCandidates > 0:
     candidateBoxes = [bboxes[t]+[labels[t]] for t in range(candIdx.shape[0]) if candIdx[t]]
     candidateScores = scores[candIdx]
     filteredBoxes,filteredScores = det.nonMaximumSuppression(candidateBoxes,candidateScores,self.maxOverlap)
     return (img,filteredBoxes,filteredScores)
   else:
     return None
Пример #7
0
 def project(self, img, regions, imgDet):
     imgProp = selectProposalsByArea(self.proposals[img], self.minArea,
                                     self.maxArea)
     results = []
     transformedAreas, areasScores = [], []
     convUnitToRegion = wp.Warping()
     print 'Image:', img, 'proposals:', len(imgProp), 'detections:', len(
         imgDet)
     for r in range(len(regions)):
         regionDet = imgDet[imgDet[:, 1] == r]
         convUnitToRegion.prepare([0, 0, 226, 226],
                                  map(float, regions[r][1:]))
         for d in range(len(regionDet)):
             x, y = regionDet[d, 2:4]
             sa = convUnitToRegion.transform(
                 self.projections[int(x)][int(y)])
             transformedAreas.append([0] + sa)
             areasScores.append(regionDet[d, 4])
     transformedAreas, areasScores = det.nonMaximumSuppression(
         transformedAreas, areasScores, 0.5)
     #self.visualizeScoreMaps(img,transformedAreas, areasScores)
     transformedAreas = [
         transformedAreas[i] + [areasScores[i]]
         for i in range(len(areasScores))
     ]
     propScores = scoreProposals(transformedAreas, imgProp)
     imgProp = [
         imgProp[i] for i in range(len(imgProp)) if propScores[i] >= 0.0
     ]
     propScores = [
         propScores[i] for i in range(len(propScores))
         if propScores[i] >= 0.0
     ]
     finalBoxes, finalScores = det.nonMaximumSuppression(
         imgProp, propScores, self.nmsThreshold)
     for i in range(len(finalBoxes)):
         results.append([img, finalScores[i]] + map(int, finalBoxes[i]))
     return results
Пример #8
0
 def project(self,img,regions,imgDet):
     imgProp = selectProposalsByArea(self.proposals[img],self.minArea,self.maxArea)
     results = []
     transformedAreas,areasScores = [],[]
     convUnitToRegion = wp.Warping()
     print 'Image:',img,'proposals:',len(imgProp),'detections:',len(imgDet)
     for r in range(len(regions)):
         regionDet = imgDet[imgDet[:,1]==r]
         convUnitToRegion.prepare([0,0,226,226],map(float,regions[r][1:]))
         for d in range(len(regionDet)):
             x,y = regionDet[d,2:4]
             sa = convUnitToRegion.transform(self.projections[int(x)][int(y)])
             transformedAreas.append( [0] + sa )
             areasScores.append(regionDet[d,4])
     transformedAreas,areasScores = det.nonMaximumSuppression(transformedAreas, areasScores, 0.5)
     #self.visualizeScoreMaps(img,transformedAreas, areasScores)
     transformedAreas = [transformedAreas[i]+[areasScores[i]] for i in range(len(areasScores))]
     propScores = scoreProposals(transformedAreas,imgProp)
     imgProp = [imgProp[i] for i in range(len(imgProp)) if propScores[i] >= 0.0]
     propScores = [propScores[i] for i in range(len(propScores)) if propScores[i] >= 0.0]
     finalBoxes,finalScores = det.nonMaximumSuppression(imgProp, propScores, self.nmsThreshold)
     for i in range(len(finalBoxes)):
         results.append( [img,finalScores[i]] + map(int,finalBoxes[i]) )
     return results
Пример #9
0
 def run(self, img, features, bboxes):
     scores, labels = self.model.predictAll(features, bboxes)
     candIdx = scores >= self.threshold
     numCandidates = candIdx[candIdx == True].shape[0]
     if numCandidates > 0:
         candidateBoxes = [
             bboxes[t] + [labels[t]] for t in range(candIdx.shape[0])
             if candIdx[t]
         ]
         candidateScores = scores[candIdx]
         filteredBoxes, filteredScores = det.nonMaximumSuppression(
             candidateBoxes, candidateScores, self.maxOverlap)
         return (img, filteredBoxes, filteredScores)
     else:
         return None
Пример #10
0
 def run(self, img, features, bboxes):
     results = {}
     for category in models.keys():
         scores = self.models[category].predict(features)
         candIdx = scores >= self.threshold
         numCandidates = candIdx[candIdx == True].shape[0]
         if numCandidates > 0:
             candidateBoxes = [
                 bboxes[t] for t in range(candIdx.shape[0]) if candIdx[t]
             ]
             candidateScores = scores[candIdx]
             filteredBoxes, filteredScores = det.nonMaximumSuppression(
                 candidateBoxes, candidateScores, self.maxOverlap)
             results[category] = (img, filteredBoxes, filteredScores)
     return results
def evaluateCategory(scoredDetections, ranking, groundTruthFile, output=None):
  performance = []
  detections = []
  for img in scoredDetections.keys():
    data = scoredDetections[img]
    idx = range(len(data['boxes']))
    boxes = [data['boxes'][i] for i in idx if data[ranking][i] > float('-inf')]
    scores = [data[ranking][i] for i in idx if data[ranking][i] > float('-inf')]
    if len(boxes) > 0:
      fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
      for i in range(len(fBoxes)):
        detections.append( [img, fScores[i]] + fBoxes[i] )
  detections.sort(key=lambda x:x[1], reverse=True)
  gtBoxes = [x.split() for x in open(groundTruthFile)]
  numPositives = len(gtBoxes)
  groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
  results = eval.evaluateDetections(groundTruth, detections, 0.5)
  if output is not None:
    output = output + '.' + ranking
  prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output)
  return prec, recall
import h5py
import numpy as np
import libDetection as det

params = cu.loadParams('rcnnImdbFile MatlabScoresDir outputDir doNMS')

imdb = h5py.File(params['rcnnImdbFile'])
print 'Images:',imdb['imdb']['image_ids']
images = [u''.join(unichr(c) for c in imdb[o]) for o in imdb['imdb']['image_ids'][0]]
doNMS = params['doNMS'] != 'noNMS'

for f in os.listdir(params['MatlabScoresDir']):
  if f.endswith('__all.mat') and f.find('_boxes_') != -1:
    nameParts = f.split('_')
    out = open(params['outputDir'] + '/' + nameParts[0] + '_' + nameParts[1] + '.out', 'w')
    data = scipy.io.loadmat(params['MatlabScoresDir'] + '/' + f)
    print nameParts[0:2]
    for i in range(data['boxes'].shape[0]):
      detections = data['boxes'][i][0]
      img = str(images[i])
      boxes = [ box[0:4].tolist() for box in detections ]
      scores = [ box[-1] for box in detections ]
      if len(boxes) == 0: 
        continue
      if doNMS:
        boxes, scores = det.nonMaximumSuppression(boxes, scores, 0.3)
      for j in range(len(boxes)):
        box = boxes[j]
        out.write(img + ' {:10.8f} {:.0f} {:.0f} {:.0f} {:.0f} 0\n'.format(scores[j], box[0], box[1], box[2], box[3]) )
     
Пример #13
0
    try:
        groundTruth[k[0]] += [k[1:]]
    except:
        groundTruth[k[0]] = [k[1:]]

## Make Detections
features, bboxes = cu.loadMatrixAndIndex(featuresDir + '/' + testImage + '.' +
                                         featuresExt)
scores = model.predict(features)
candIdx = scores >= threshold
numCandidates = candIdx[candIdx == True].shape[0]
print 'Candidate Boxes:', numCandidates
if numCandidates > 0:
    candidateBoxes = [bboxes[t] for t in range(candIdx.shape[0]) if candIdx[t]]
    candidateScores = scores[candIdx]
    filteredBoxes, filteredScores = det.nonMaximumSuppression(
        candidateBoxes, candidateScores, maxOverlap)
    print testImage, len(filteredBoxes)
    #for i in range(len(filteredBoxes)):
    #  b = filteredBoxes[i]
    #  print b[0] + ' {:.8f} {:} {:} {:} {:}\n'.format(filteredScores[i],b[1],b[2],b[3],b[4])
    det.showDetections('/home/caicedo/data/allimgs/' + testImage + '.jpg',
                       filteredBoxes, filteredScores, True)
    det.showDetections('/home/caicedo/data/allimgs/' + testImage + '.jpg',
                       candidateBoxes, candidateScores, False)
    det.showBestMatches('/home/caicedo/data/allimgs/' + testImage + '.jpg',
                        candidateBoxes, candidateScores,
                        groundTruth[testImage])

sys.exit()
import matplotlib.pyplot as plt
features = np.asmatrix(features)
  
  pickle.dump(imgs, open(params['scoresFile']+'.p','wb'))

sys.exit()

categories = 'aeroplane bicycle bird boat bottle bus car cat chair cow diningtable dog horse motorbike person pottedplant sheep sofa train tvmonitor'.split()
categories = categories + [c + '_big' for c in categories] + [c + '_inside' for c in categories]

if params['category'] == 'all':
  selectedCategories = range(len(categories))
  out = dict([ (c,open(params['outputDir']+'/'+c+'.out','w')) for c in categories])
else:
  catIdx = int(params['category'])
  selectedCategories = [catIdx]
  out = {categories[catIdx]:open(params['outputDir']+'/'+categories[catIdx]+'.out','w')}

counter = 0
for i in imgs.keys():
  counter += 1
  print counter,i,
  for j in selectedCategories:
    print categories[j],
    fb, fs = det.nonMaximumSuppression(imgs[i]['boxes'], imgs[i]['scores'][:,j], 0.3)
    for k in range(len(fb)):
      out[categories[j]].write(i + ' {:.8f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(fs[k],fb[k][0],fb[k][1],fb[k][2],fb[k][3],0))
  print ''
  imgs[i] = []

for o in out.keys():
  out[o].close()
Пример #15
0
if params['category'] == 'all':
    selectedCategories = range(len(categories))
    out = dict([(c, open(params['outputDir'] + '/' + c + '.out', 'w'))
                for c in categories])
else:
    catIdx = int(params['category'])
    selectedCategories = [catIdx]
    out = {
        categories[catIdx]:
        open(params['outputDir'] + '/' + categories[catIdx] + '.out', 'w')
    }

counter = 0
for i in imgs.keys():
    counter += 1
    print counter, i,
    for j in selectedCategories:
        print categories[j],
        fb, fs = det.nonMaximumSuppression(imgs[i]['boxes'],
                                           imgs[i]['scores'][:, j], 0.3)
        for k in range(len(fb)):
            out[categories[j]].write(
                i + ' {:.8f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n'.format(
                    fs[k], fb[k][0], fb[k][1], fb[k][2], fb[k][3], 0))
    print ''
    imgs[i] = []

for o in out.keys():
    out[o].close()
Пример #16
0
imdb = h5py.File(params['rcnnImdbFile'])
print 'Images:', imdb['imdb']['image_ids']
images = [
    u''.join(unichr(c) for c in imdb[o]) for o in imdb['imdb']['image_ids'][0]
]
doNMS = params['doNMS'] != 'noNMS'

for f in os.listdir(params['MatlabScoresDir']):
    if f.endswith('__all.mat') and f.find('_boxes_') != -1:
        nameParts = f.split('_')
        out = open(
            params['outputDir'] + '/' + nameParts[0] + '_' + nameParts[1] +
            '.out', 'w')
        data = scipy.io.loadmat(params['MatlabScoresDir'] + '/' + f)
        print nameParts[0:2]
        for i in range(data['boxes'].shape[0]):
            detections = data['boxes'][i][0]
            img = str(images[i])
            boxes = [box[0:4].tolist() for box in detections]
            scores = [box[-1] for box in detections]
            if len(boxes) == 0:
                continue
            if doNMS:
                boxes, scores = det.nonMaximumSuppression(boxes, scores, 0.3)
            for j in range(len(boxes)):
                box = boxes[j]
                out.write(img +
                          ' {:10.8f} {:.0f} {:.0f} {:.0f} {:.0f} 0\n'.format(
                              scores[j], box[0], box[1], box[2], box[3]))
Пример #17
0
for k in gt:
  try:
    groundTruth[k[0]] += [ k[1:] ]
  except:
    groundTruth[k[0]] = [ k[1:] ]

## Make Detections
features,bboxes = cu.loadMatrixAndIndex( featuresDir+'/'+testImage+'.'+featuresExt )
scores = model.predict( features )
candIdx = scores>=threshold
numCandidates = candIdx[candIdx==True].shape[0]
print 'Candidate Boxes:',numCandidates
if numCandidates > 0:
  candidateBoxes = [bboxes[t] for t in range(candIdx.shape[0]) if candIdx[t]]
  candidateScores = scores[candIdx]
  filteredBoxes,filteredScores = det.nonMaximumSuppression(candidateBoxes,candidateScores,maxOverlap)
  print testImage,len(filteredBoxes)
  #for i in range(len(filteredBoxes)):
  #  b = filteredBoxes[i]
  #  print b[0] + ' {:.8f} {:} {:} {:} {:}\n'.format(filteredScores[i],b[1],b[2],b[3],b[4])
  det.showDetections('/home/caicedo/data/allimgs/'+testImage+'.jpg', filteredBoxes, filteredScores, True)
  det.showDetections('/home/caicedo/data/allimgs/'+testImage+'.jpg', candidateBoxes, candidateScores, False)
  det.showBestMatches('/home/caicedo/data/allimgs/'+testImage+'.jpg', candidateBoxes, candidateScores, groundTruth[testImage])

sys.exit()
import matplotlib.pyplot as plt
features = np.asmatrix(features)
K = features*features.T
N = np.diag(K)
D = np.tile(np.mat(N).T,(1,K.shape[0])) + np.tile(np.mat(N),(K.shape[0],1)) - 2*K
plt.imshow(G)