def getFinalRank(gt): print "Examining ground truth:\n", gt pname = multiprocessing.current_process().name metricTable = [] for submissionFiles in submissionFilesTable: # Search for match to this ground truth in submissions list subm = None for f in submissionFiles: q = getSubjectKey(f) if os.path.basename(gt).find(q) >= 0: subm = f # Handle missing submissions if subm is None: print "WARNING: Cannot find any submissions for", gt, \ "in path", os.path.dirname(submissionFiles[0]) metricTable.append( [np.nan] * len(metricNames) ) continue textout = os.path.join(tempfile.gettempdir(), pname + "_" + os.path.basename(gt) + ".out") tempFileSet.add(textout) # Evaluate each metric on gt and perturbed submission metricValues = [] for j in range(numMetricBinaries): # Run validation app and obtain list of metrics from stdout command = [metricBinaryList[j], gt, subm, textout] #print "Running", command p = subprocess.Popen(args=command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() metricValues += getMetricValues(textout, numObjects) if debugAddRandom: metricValues += [random.uniform(0,100)] metricTable.append(metricValues) metricTable = np.array(metricTable) rankagg = UnsupervisedLearningRankAggregator() rankagg.set_weights(averageWeights) return rankagg.get_aggregated_rank(metricTable, metricOrder)
metricTableList = pool.map(getMetricTable, [groundTruthFiles[i_gt] for i_gt in gtIndices]) pool.close() pool.join() print "Number of metric tables", len(metricTableList), \ "with shape", metricTableList[0].shape for metricTable in metricTableList: print metricTable print "Aggregating rankings from metric tables" rankagg = UnsupervisedLearningRankAggregator() rankagg.aggregate(metricTableList, metricOrder) #averageWeights += rankagg.get_weights() w = rankagg.get_weights() #if t % 20 == 0: # print "Weights", w print "Weights\n", w averageWeights += w print "Average weights after", t+1, "perturbations\n", averageWeights/(t+1) averageWeights /= numPerturbations