Example #1
0
computeInfluence = True
graphRanker = GraphRanker(k=100, numRuns=100, computeInfluence=computeInfluence, p=0.05, inputRanking=[1, 2])
methodNames = graphRanker.getNames()
methodNames.append("MC2")

numMethods = len(methodNames) 
averageTrainPrecisions = numpy.zeros((len(dataset.fields), len(ns), numMethods))
averageTestPrecisions = numpy.zeros((len(dataset.fields), len(ns), numMethods))

coverages = numpy.load(dataset.coverageFilename)
print("==== Coverages ====")
print(coverages)

for s, field in enumerate(dataset.fields): 
    if ranLSI: 
        outputFilename = dataset.getOutputFieldDir(field) + "outputListsLSI.npz"
        documentFilename = dataset.getOutputFieldDir(field) + "relevantDocsLSI.npy"
    else: 
        outputFilename = dataset.getOutputFieldDir(field) + "outputListsLDA.npz"
        documentFilename = dataset.getOutputFieldDir(field) + "relevantDocsLDA.npy"
        
    try: 
        print(field)  
        print("-----------")
        outputLists, trainExpertMatchesInds, testExpertMatchesInds = Util.loadPickle(outputFilename)
        
        graph, authorIndexer = Util.loadPickle(dataset.getCoauthorsFilename(field))

        trainPrecisions = numpy.zeros((len(ns), numMethods))
        testPrecisions = numpy.zeros((len(ns), numMethods))
        
Example #2
0
         fich.write(str(ListeAuthorsFinale)+"\n")
        #Save relevant authors 
        #numpy.save(dataset.dataDir  + "relevantAuthorsReputation" + field +  ".txt", outputLists)
        fich.write("-----------------------------------------------")  
          
        #for line in outputLists:  
         #fich.write(line[i]) 
        #Ajout du score de l'expertise
        #outputLists.append(expertAuthorsInds)

         
        itemList = RankAggregator.generateItemList(outputLists)
        methodNames = graphRanker.getNames()
        
        if runLSI: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLSI.npz"
        else: 
            outputFilename = dataset.getOutputFieldDir(field) + "outputListsLDA.npz"
            
        Util.savePickle([outputLists, trainExpertMatchesInds, testExpertMatchesInds], outputFilename, debug=True)
        
        numMethods = len(outputLists)
        precisions = numpy.zeros((len(ns), numMethods))
        averagePrecisions = numpy.zeros(numMethods)
        
        for i, n in enumerate(ns):     
            for j in range(len(outputLists)): 
                precisions[i, j] = Evaluator.precisionFromIndLists(testExpertMatchesInds, outputLists[j][0:n]) 
            
        for j in range(len(outputLists)):                 
            averagePrecisions[j] = Evaluator.averagePrecisionFromLists(testExpertMatchesInds, outputLists[j][0:averagePrecisionN], averagePrecisionN)