Example #1
0
  def getFinalRank(gt):

    print "Examining ground truth:\n", gt

    pname = multiprocessing.current_process().name

    metricTable = []
    for submissionFiles in submissionFilesTable:

      # Search for match to this ground truth in submissions list
      subm = None
      for f in submissionFiles:
        q = getSubjectKey(f)

        if os.path.basename(gt).find(q) >= 0:
          subm = f

      # Handle missing submissions
      if subm is None:
        print "WARNING: Cannot find any submissions for", gt, \
          "in path", os.path.dirname(submissionFiles[0])
        metricTable.append( [np.nan] * len(metricNames) )
        continue

      textout = os.path.join(tempfile.gettempdir(),
        pname + "_" + os.path.basename(gt) + ".out")

      tempFileSet.add(textout)

      # Evaluate each metric on gt and perturbed submission
      metricValues = []
      for j in range(numMetricBinaries):

        # Run validation app and obtain list of metrics from stdout
        command = [metricBinaryList[j], gt, subm, textout]

        #print "Running", command

        p = subprocess.Popen(args=command, stdout=subprocess.PIPE,
          stderr=subprocess.PIPE)
        stdout, stderr = p.communicate()

        metricValues += getMetricValues(textout, numObjects)

        if debugAddRandom:
          metricValues += [random.uniform(0,100)]

      metricTable.append(metricValues)

    metricTable = np.array(metricTable)

    rankagg = UnsupervisedLearningRankAggregator()
    rankagg.set_weights(averageWeights)

    return rankagg.get_aggregated_rank(metricTable, metricOrder)