Exemplo n.º 1
0
 def testAveragePrecisionFromLists(self): 
     predList  = [4, 2, 10]
     testList = [4, 2, 15, 16]
     
     self.assertEquals(Evaluator.averagePrecisionFromLists(testList, predList), 0.5)
     
     predList = [0,1,2,3,4,5]
     testList = [0, 3, 4, 5]
     self.assertAlmostEquals(Evaluator.averagePrecisionFromLists(testList, predList), 0.691666666666)
Exemplo n.º 2
0
    def testAveragePrecisionFromLists(self):
        predList = [4, 2, 10]
        testList = [4, 2, 15, 16]

        self.assertEquals(
            Evaluator.averagePrecisionFromLists(testList, predList), 0.5)

        predList = [0, 1, 2, 3, 4, 5]
        testList = [0, 3, 4, 5]
        self.assertAlmostEquals(
            Evaluator.averagePrecisionFromLists(testList, predList),
            0.691666666666)
Exemplo n.º 3
0
    def greedyMC2(lists, itemList, trainList, n): 
        """
        A method to greedily select a subset of the outputLists such that 
        the average precision is maximised
        """
        currentListsInds = range(len(lists))
        newListsInds = []
        currentAvPrecision = 0 
        lastAvPrecision = -0.1
        
        while currentAvPrecision - lastAvPrecision > 0: 
            lastAvPrecision = currentAvPrecision 
            averagePrecisions = numpy.zeros(len(currentListsInds))      
            
            for i, j in enumerate(currentListsInds):
                newListsInds.append(j)

                newLists = []                
                for k in newListsInds: 
                    newLists.append(lists[k])
                
                rankAggregate, scores = RankAggregator.MC2(newLists, itemList)
                averagePrecisions[i] = Evaluator.averagePrecisionFromLists(trainList, rankAggregate[0:n], n)
                newListsInds.remove(j)

            j = numpy.argmax(averagePrecisions)
            currentAvPrecision = averagePrecisions[j]
            
            if currentAvPrecision > lastAvPrecision: 
                newListsInds.append(currentListsInds.pop(j))
            
        return newListsInds 
            
                
        
        
Exemplo n.º 4
0
        outputLists = graphRanker.vertexRankings(graph, relevantAuthorsInds)
             
        itemList = RankAggregator.generateItemList(outputLists)
        methodNames = graphRanker.getNames()
        
        if runLSI: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLSI.npz"
        else: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLDA.npz"
            
        Util.savePickle([outputLists, trainExpertMatchesInds, testExpertMatchesInds], outputFilename, debug=True)
        
        numMethods = len(outputLists)
        precisions = numpy.zeros((len(ns), numMethods))
        averagePrecisions = numpy.zeros(numMethods)
        
        for i, n in enumerate(ns):     
            for j in range(len(outputLists)): 
                precisions[i, j] = Evaluator.precisionFromIndLists(testExpertMatchesInds, outputLists[j][0:n]) 
            
        for j in range(len(outputLists)):                 
            averagePrecisions[j] = Evaluator.averagePrecisionFromLists(testExpertMatchesInds, outputLists[j][0:averagePrecisionN], averagePrecisionN) 
        
        precisions2 = numpy.c_[numpy.array(ns), precisions]
        
        logging.debug(Latex.listToRow(methodNames))
        logging.debug(Latex.array2DToRows(precisions2))
        logging.debug(Latex.array1DToRow(averagePrecisions))

logging.debug("All done!")