Exemplo n.º 1
0
    def testPrecisionFromIndLists(self): 
        predList  = [4, 2, 10]
        testList = [4, 2]

        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList), 2.0/3)  
        
        testList = [4, 2, 10]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList), 1) 
        
        predList  = [10, 2, 4]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList), 1)
        
        testList = [1, 9, 11]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList), 0)
        
        predList = [1, 2, 3, 4, 5]
        testList = [1, 9, 11]
        
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList), 1.0/5)
Exemplo n.º 2
0
    def testPrecisionFromIndLists(self):
        predList = [4, 2, 10]
        testList = [4, 2]

        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList),
                          2.0 / 3)

        testList = [4, 2, 10]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList),
                          1)

        predList = [10, 2, 4]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList),
                          1)

        testList = [1, 9, 11]
        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList),
                          0)

        predList = [1, 2, 3, 4, 5]
        testList = [1, 9, 11]

        self.assertEquals(Evaluator.precisionFromIndLists(testList, predList),
                          1.0 / 5)
Exemplo n.º 3
0
        outputLists = graphRanker.vertexRankings(graph, relevantAuthorsInds)
             
        itemList = RankAggregator.generateItemList(outputLists)
        methodNames = graphRanker.getNames()
        
        if runLSI: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLSI.npz"
        else: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLDA.npz"
            
        Util.savePickle([outputLists, trainExpertMatchesInds, testExpertMatchesInds], outputFilename, debug=True)
        
        numMethods = len(outputLists)
        precisions = numpy.zeros((len(ns), numMethods))
        averagePrecisions = numpy.zeros(numMethods)
        
        for i, n in enumerate(ns):     
            for j in range(len(outputLists)): 
                precisions[i, j] = Evaluator.precisionFromIndLists(testExpertMatchesInds, outputLists[j][0:n]) 
            
        for j in range(len(outputLists)):                 
            averagePrecisions[j] = Evaluator.averagePrecisionFromLists(testExpertMatchesInds, outputLists[j][0:averagePrecisionN], averagePrecisionN) 
        
        precisions2 = numpy.c_[numpy.array(ns), precisions]
        
        logging.debug(Latex.listToRow(methodNames))
        logging.debug(Latex.array2DToRows(precisions2))
        logging.debug(Latex.array1DToRow(averagePrecisions))

logging.debug("All done!")