コード例 #1
0
def evaluateCategory(scoredDetections, categoryIdx, maxTime, step,
                     groundTruthFile, output):
    performance = []
    ## Do a time analysis evaluation
    T = 0
    for t in range(0, maxTime, step):
        print " ****************** TIME: {:3} ********************** ".format(
            T)
        detections = []
        for img in scoredDetections.keys():
            data = scoredDetections[img]
            idx = [i for i in range(len(data['time'])) if data['time'][i] <= t]
            boxes = [data['boxes'][i] for i in idx]
            scores = [data['scores'][i][categoryIdx] for i in idx]
            if len(boxes) > 0:
                fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
                for i in range(len(fBoxes)):
                    detections.append([img, fScores[i]] + fBoxes[i])
        detections.sort(key=lambda x: x[1], reverse=True)
        gtBoxes = [x.split() for x in open(groundTruthFile)]
        numPositives = len(gtBoxes)
        groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
        results = eval.evaluateDetections(groundTruth, detections, 0.5)
        if t == maxTime - 1:
            prec, recall = eval.computePrecisionRecall(numPositives,
                                                       results['tp'],
                                                       results['fp'], output)
        else:
            prec, recall = eval.computePrecisionRecall(numPositives,
                                                       results['tp'],
                                                       results['fp'])
        performance.append([prec, recall])
        T += 1
    return performance
コード例 #2
0
def evaluateCategory(scoredDetections, categoryIdx, maxTime, groundTruthFile, output):
  performance = []
  ## Do a time analysis evaluation
  for t in range(maxTime):
    print " ****************** TIME: {:3} ********************** ".format(t) 
    detections = []
    for img in scoredDetections.keys():
      data = scoredDetections[img]
      idx = [i for i in range(len(data['time'])) if data['time'][i] <= t]
      boxes = [data['boxes'][i] for i in idx]
      scores = [data['scores'][i][categoryIdx] for i in idx]
      if len(boxes) > 0:
        fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
        for i in range(len(fBoxes)):
          detections.append( [img, fScores[i]] + fBoxes[i] )
    detections.sort(key=lambda x:x[1], reverse=True)
    gtBoxes = [x.split() for x in open(groundTruthFile)]
    numPositives = len(gtBoxes)
    groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
    results = eval.evaluateDetections(groundTruth, detections, 0.5)
    if t == maxTime - 1:
      prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output)
    else:
      prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'])
    performance.append( [prec, recall] )
  return performance
コード例 #3
0
def mainLoop(modelType,modelArgs,positives,trueObjectBoxes,trainingList,featuresDir,featuresExt,modelOut,maxNegOverlap,iter):
  pos,posIdx,ari,osi = positives
  startTime = cu.tic()
  if iter == 0:
    ## Random Negatives
    print ' >>> RANDOM NEGATIVES'
    neg,negIdx = learn.getRandomNegs(featuresDir,trainingList,featuresExt,pos.shape[1],maxVectorsCache,maxNegativeImages)
    detectionsList = [ [x[0],'0.0']+x[1:]+['1'] for x in negIdx]
    hards = {'features':np.zeros((0,neg.shape[1])),'index':[]}
    lap = cu.toc('Random negatives matrix ('+str(neg.shape[0])+' instances)',startTime)
  else:
    ## Mine hard negatives
    print ' >>> MINING HARD NEGATIVES'
    model = det.createDetector(modelType,modelArgs)
    model.load(modelOut+'.'+ str( iter-1 ))
    detectionsList = detector.detectObjects(model,trainingList,featuresDir,featuresExt,0.3,-10.0)
    hards = cu.loadMatrixNoCompression(modelOut+'.hards').item()
    lap = cu.toc('Hard negatives matrix ('+str(hards['features'].shape[0])+' instances)',startTime)

  ## Rank and clean negative detections
  detectionsData = evaluation.loadDetections(detectionsList)
  groundTruth = evaluation.loadGroundTruthAnnotations(trueObjectBoxes)
  detectionsLog = evaluation.evaluateDetections(groundTruth,detectionsData,0.5,allowDuplicates=True) # overlapMeasure=validRegion,
  evaluation.computePrecisionRecall(len(posIdx),detectionsLog['tp'],detectionsLog['fp'],'tmp.txt')
  evaluation.computePrecAt(detectionsLog['tp'],[20,50,100,200,300,400,500])
  logData = learn.parseRankedDetectionsFile(detectionsLog['log'],maxNegOverlap,maxNegativeVectors)
  print ' >>> LOADING HARD NEGATIVES'
  neg,negIdx = learn.loadHardNegativesFromList(featuresDir,logData['negExamples'],featuresExt,pos.shape[1],logData['negTaken'])
  del(detectionsList,detectionsData,detectionsLog,logData)
  lap = cu.toc('Ranked negatives matrix ('+str(neg.shape[0])+' instances)',lap)
  neg = np.concatenate( (neg,hards['features']) )
  negIdx = negIdx + hards['index']

  ## Learn Detector
  clf = det.createDetector(modelType,modelArgs)
  clf.learn(pos,neg,posIdx,negIdx)
  clf.save(modelOut+'.'+str(iter))
  lap = cu.toc('Classifier learned:',lap)

  ## Keep hard negatives for next iterations
  scores = clf.predict(neg,negIdx)
  hardNegsIdx = np.argsort(scores)
  hardNeg = np.concatenate( (hards['features'], neg[hardNegsIdx[-cu.topHards:]]) )
  negIdx = hards['index'] + [negIdx[j] for j in hardNegsIdx[-cu.topHards:]]
  print 'Hard negatives:',hardNeg.shape[0]
  hards = {'features':hardNeg, 'index':negIdx}
  cu.saveMatrixNoCompression({'features':hardNeg,'index':negIdx},modelOut+'.hards')

  print ' ** Iteration',iter,'done'
  return {'detector':clf,'pos':pos,'posIdx':posIdx,'neg':neg,'negIdx':negIdx}
コード例 #4
0
def evaluateCategory(scoredDetections, ranking, groundTruthFile, output=None):
    performance = []
    detections = []
    for img in scoredDetections.keys():
        data = scoredDetections[img]
        idx = range(len(data['boxes']))
        boxes = [
            data['boxes'][i] for i in idx if data[ranking][i] > float('-inf')
        ]
        scores = [
            data[ranking][i] for i in idx if data[ranking][i] > float('-inf')
        ]
        if len(boxes) > 0:
            fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
            for i in range(len(fBoxes)):
                detections.append([img, fScores[i]] + fBoxes[i])
    detections.sort(key=lambda x: x[1], reverse=True)
    gtBoxes = [x.split() for x in open(groundTruthFile)]
    numPositives = len(gtBoxes)
    groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
    results = eval.evaluateDetections(groundTruth, detections, 0.5)
    if output is not None:
        output = output + '.' + ranking
    prec, recall = eval.computePrecisionRecall(numPositives, results['tp'],
                                               results['fp'], output)
    return prec, recall
コード例 #5
0
def detectObjects(imageList, featuresDir, indexType, groundTruthDir,
                  outputDir):
    maxOverlap = 0.3
    categories, catIndex = bse.categoryIndex(indexType)
    task = SoftmaxDetector(maxOverlap, catIndex)
    result = processData(imageList, featuresDir, 'prob', task)
    # Collect detection results after NMS
    detections = dict([(c, []) for c in catIndex])
    for res in result:
        for idx in catIndex:
            img, filteredBoxes, filteredScores = res[idx]
            for j in range(len(filteredBoxes)):
                detections[idx].append([img, filteredScores[j]] +
                                       filteredBoxes[j])
    # Evaluate results for each category independently
    for idx in catIndex:
        groundTruthFile = groundTruthDir + '/' + categories[
            idx] + '_test_bboxes.txt'
        output = outputDir + '/' + categories[idx] + '.out'
        detections[idx].sort(key=lambda x: x[1], reverse=True)
        gtBoxes = [x.split() for x in open(groundTruthFile)]
        numPositives = len(gtBoxes)
        groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
        results = eval.evaluateDetections(groundTruth, detections[idx], 0.5)
        prec, recall = eval.computePrecisionRecall(numPositives, results['tp'],
                                                   results['fp'], output)
コード例 #6
0
def evaluateCategory(scoredDetections, ranking, groundTruthFile, output=None):
  performance = []
  detections = []
  for img in scoredDetections.keys():
    data = scoredDetections[img]
    idx = range(len(data['boxes']))
    boxes = [data['boxes'][i] for i in idx if data[ranking][i] > float('-inf')]
    scores = [data[ranking][i] for i in idx if data[ranking][i] > float('-inf')]
    if len(boxes) > 0:
      fBoxes, fScores = det.nonMaximumSuppression(boxes, scores, 0.3)
      for i in range(len(fBoxes)):
        detections.append( [img, fScores[i]] + fBoxes[i] )
  detections.sort(key=lambda x:x[1], reverse=True)
  gtBoxes = [x.split() for x in open(groundTruthFile)]
  numPositives = len(gtBoxes)
  groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
  results = eval.evaluateDetections(groundTruth, detections, 0.5)
  if output is not None:
    output = output + '.' + ranking
  prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output)
  return prec, recall
コード例 #7
0
def detectObjects(imageList, featuresDir, indexType, groundTruthDir, outputDir):
  maxOverlap = 0.3
  categories, catIndex = bse.categoryIndex(indexType)
  task = SoftmaxDetector(maxOverlap, catIndex)
  result = processData(imageList, featuresDir, 'prob', task)
  # Collect detection results after NMS
  detections = dict([ (c,[]) for c in catIndex])
  for res in result:
    for idx in catIndex:
      img, filteredBoxes, filteredScores = res[idx]
      for j in range(len(filteredBoxes)):
        detections[idx].append( [img, filteredScores[j]] + filteredBoxes[j] )
  # Evaluate results for each category independently
  for idx in catIndex:
    groundTruthFile = groundTruthDir + '/' + categories[idx] + '_test_bboxes.txt'
    output = outputDir + '/' + categories[idx] + '.out'
    detections[idx].sort(key=lambda x:x[1], reverse=True)
    gtBoxes = [x.split() for x in open(groundTruthFile)]
    numPositives = len(gtBoxes)
    groundTruth = eval.loadGroundTruthAnnotations(gtBoxes)
    results = eval.evaluateDetections(groundTruth, detections[idx], 0.5)
    prec, recall = eval.computePrecisionRecall(numPositives, results['tp'], results['fp'], output)
コード例 #8
0
def mainLoop(modelType, modelArgs, positives, trainingList, featuresDir,
             featuresExt, modelOut, maxNegOverlap, iter):
    pos, posIdx, ari, osi = positives
    startTime = cu.tic()
    if iter == 0:
        ## Random Negatives
        print ' >>> RANDOM NEGATIVES'
        neg, negIdx = learn.getRandomNegs(featuresDir, trainingList,
                                          featuresExt, pos.shape[1],
                                          maxVectorsCache, maxNegativeImages)
        detectionsList = [[x[0], '0.0'] + x[1:] + ['1'] for x in negIdx]
        hards = {'features': np.zeros((0, neg.shape[1])), 'index': []}
        lap = cu.toc(
            'Random negatives matrix (' + str(neg.shape[0]) + ' instances)',
            startTime)
    else:
        ## Mine hard negatives
        print ' >>> MINING HARD NEGATIVES'
        model = det.createDetector(modelType, modelArgs)
        model.load(modelOut + '.' + str(iter - 1))
        detectionsList = detector.detectObjects(
            model, trainingList, featuresDir, featuresExt, 1.0, -10.0
        )  # For RCNN the overlap parameter is 0.3 not 1.0(no suppression)
        hards = cu.loadMatrixNoCompression(modelOut + '.hards').item()
        lap = cu.toc(
            'Hard negatives matrix (' + str(hards['features'].shape[0]) +
            ' instances)', startTime)

    ## Rank and clean negative detections
    detectionsData = evaluation.loadDetections(detectionsList)
    groundTruth = evaluation.loadGroundTruthAnnotations(posIdx)
    detectionsLog = evaluation.evaluateDetections(
        groundTruth, detectionsData, 0.5,
        allowDuplicates=False)  #,overlapMeasure=det.overlap
    evaluation.computePrecisionRecall(len(posIdx), detectionsLog['tp'],
                                      detectionsLog['fp'], 'tmp.txt')
    evaluation.computePrecAt(detectionsLog['tp'],
                             [20, 50, 100, 200, 300, 400, 500])
    logData = learn.parseRankedDetectionsFile(detectionsLog['log'],
                                              maxNegOverlap,
                                              maxNegativeVectors)
    print ' >>> LOADING HARD NEGATIVES'
    neg, negIdx = learn.loadHardNegativesFromList(featuresDir,
                                                  logData['negExamples'],
                                                  featuresExt, pos.shape[1],
                                                  logData['negTaken'])
    del (detectionsList, detectionsData, detectionsLog, logData)
    lap = cu.toc(
        'Ranked negatives matrix (' + str(neg.shape[0]) + ' instances)', lap)
    neg = np.concatenate((neg, hards['features']))
    negIdx = negIdx + hards['index']

    ## Learn Detector
    clf = det.createDetector(modelType, modelArgs)
    clf.learn(pos, neg, posIdx, negIdx)
    clf.save(modelOut + '.' + str(iter))
    lap = cu.toc('Classifier learned:', lap)

    ## Keep hard negatives for next iterations
    scores = clf.predict(neg, negIdx)
    hardNegsIdx = np.argsort(scores)
    hardNeg = np.concatenate(
        (hards['features'], neg[hardNegsIdx[-cu.topHards:]]))
    negIdx = hards['index'] + [negIdx[j] for j in hardNegsIdx[-cu.topHards:]]
    print 'Hard negatives:', hardNeg.shape[0]
    hards = {'features': hardNeg, 'index': negIdx}
    cu.saveMatrixNoCompression({
        'features': hardNeg,
        'index': negIdx
    }, modelOut + '.hards')

    print ' ** Iteration', iter, 'done'
    return {
        'detector': clf,
        'pos': pos,
        'posIdx': posIdx,
        'neg': neg,
        'negIdx': negIdx
    }