Пример #1
0
 def saveCSV(self, filename, fold=None):
     import sys
     sys.path.append("..")
     import Utils.TableUtils as TableUtils
     dicts = self.toDict()
     if fold != None:
         for d in dicts:
             d["fold"] = fold
     TableUtils.addToCSV(dicts, filename, g_evaluatorFieldnames)
Пример #2
0
 def saveCSV(self, filename, fold=None):
     import sys
     sys.path.append("..")
     import Utils.TableUtils as TableUtils
     dicts = self.toDict()
     if fold != None:
         for d in dicts:
             d["fold"] = fold
     TableUtils.addToCSV(dicts, filename, g_evaluatorFieldnames)
Пример #3
0
def analyzeLinearDistance(corpusElements):
    interactionEdges = 0
    interactionLinearDistanceCounts = {}
    allEntitiesLinearDistanceCounts = {}
    for sentence in corpusElements.sentences:
        sentenceGraph = sentence.sentenceGraph
        interactionEdges += len(sentence.interactions)

        # Linear distance between end tokens of interaction edges
        for interaction in sentence.interactions:
            e1 = sentence.entitiesById[interaction.get("e1")]
            e2 = sentence.entitiesById[interaction.get("e2")]
            t1 = sentenceGraph.entityHeadTokenByEntity[e1]
            t2 = sentenceGraph.entityHeadTokenByEntity[e2]
            linDistance = int(t1.get("id").split("_")[-1]) - int(
                t2.get("id").split("_")[-1])
            if linDistance < 0:
                linDistance *= -1
            if not interactionLinearDistanceCounts.has_key(linDistance):
                interactionLinearDistanceCounts[linDistance] = 0
            interactionLinearDistanceCounts[linDistance] += 1

        # Linear distance between all entities
        for i in range(len(sentence.entities) - 1):
            for j in range(i + 1, len(sentence.entities)):
                tI = sentenceGraph.entityHeadTokenByEntity[
                    sentence.entities[i]]
                tJ = sentenceGraph.entityHeadTokenByEntity[
                    sentence.entities[j]]
                linDistance = int(tI.get("id").split("_")[-1]) - int(
                    tJ.get("id").split("_")[-1])
                if linDistance < 0:
                    linDistance *= -1
                if not allEntitiesLinearDistanceCounts.has_key(linDistance):
                    allEntitiesLinearDistanceCounts[linDistance] = 0
                allEntitiesLinearDistanceCounts[linDistance] += 1

    print >> sys.stderr, "=== Linear Distance ==="
    print >> sys.stderr, "Interaction edges:", interactionEdges
    print >> sys.stderr, "Entity head token linear distance for interaction edges:"
    printPathDistribution(interactionLinearDistanceCounts)
    if options.output != None:
        interactionLinearDistanceCounts["corpus"] = options.input
        interactionLinearDistanceCounts["parse"] = options.parse
        TableUtils.addToCSV(
            interactionLinearDistanceCounts,
            options.output + "/interactionEdgeLinearDistance.csv")
    print >> sys.stderr, "Linear distance between head tokens of all entities:"
    printPathDistribution(allEntitiesLinearDistanceCounts)
    if options.output != None:
        allEntitiesLinearDistanceCounts["corpus"] = options.input
        allEntitiesLinearDistanceCounts["parse"] = options.parse
        TableUtils.addToCSV(allEntitiesLinearDistanceCounts,
                            options.output + "/allEntitiesLinearDistance.csv")
Пример #4
0
def analyzeLinearDistance(corpusElements):
    interactionEdges = 0
    interactionLinearDistanceCounts = {}
    allEntitiesLinearDistanceCounts = {}
    for sentence in corpusElements.sentences:
        sentenceGraph = sentence.sentenceGraph
        interactionEdges += len(sentence.interactions)
        
        # Linear distance between end tokens of interaction edges
        for interaction in sentence.interactions:
            e1 = sentence.entitiesById[interaction.get("e1")]
            e2 = sentence.entitiesById[interaction.get("e2")]
            t1 = sentenceGraph.entityHeadTokenByEntity[e1]
            t2 = sentenceGraph.entityHeadTokenByEntity[e2]
            linDistance = int(t1.get("id").split("_")[-1]) - int(t2.get("id").split("_")[-1])
            if linDistance < 0:
                linDistance *= -1
            if not interactionLinearDistanceCounts.has_key(linDistance):
                interactionLinearDistanceCounts[linDistance] = 0
            interactionLinearDistanceCounts[linDistance] += 1

        # Linear distance between all entities
        for i in range(len(sentence.entities)-1):
            for j in range(i+1,len(sentence.entities)):
                tI = sentenceGraph.entityHeadTokenByEntity[sentence.entities[i]]
                tJ = sentenceGraph.entityHeadTokenByEntity[sentence.entities[j]]
                linDistance = int(tI.get("id").split("_")[-1]) - int(tJ.get("id").split("_")[-1])
                if linDistance < 0:
                    linDistance *= -1
                if not allEntitiesLinearDistanceCounts.has_key(linDistance):
                    allEntitiesLinearDistanceCounts[linDistance] = 0
                allEntitiesLinearDistanceCounts[linDistance] += 1
    
    print >> sys.stderr, "=== Linear Distance ==="
    print >> sys.stderr, "Interaction edges:", interactionEdges
    print >> sys.stderr, "Entity head token linear distance for interaction edges:"
    printPathDistribution(interactionLinearDistanceCounts)
    if options.output != None:
        interactionLinearDistanceCounts["corpus"] = options.input
        interactionLinearDistanceCounts["parse"] = options.parse
        TableUtils.addToCSV(interactionLinearDistanceCounts, options.output+"/interactionEdgeLinearDistance.csv")
    print >> sys.stderr, "Linear distance between head tokens of all entities:"
    printPathDistribution(allEntitiesLinearDistanceCounts)
    if options.output != None:
        allEntitiesLinearDistanceCounts["corpus"] = options.input
        allEntitiesLinearDistanceCounts["parse"] = options.parse
        TableUtils.addToCSV(allEntitiesLinearDistanceCounts, options.output+"/allEntitiesLinearDistance.csv")
Пример #5
0
def crossValidate(exampleBuilder, corpusElements, examples, options, timer):
    parameterOptimizationSet = None
    constantParameterOptimizationSet = None
    if options.paramOptData != None:
        print >> sys.stderr, "Separating parameter optimization set"
        parameterOptimizationDivision = Example.makeCorpusDivision(corpusElements, float(options.paramOptData))
        exampleSets = Example.divideExamples(examples, parameterOptimizationDivision)
        constantParameterOptimizationSet = exampleSets[0]
        parameterOptimizationSet = constantParameterOptimizationSet
        optDocs = 0
        for k,v in parameterOptimizationDivision.iteritems():
            if v == 0:
                del corpusElements.documentsById[k]
                optDocs += 1
        print >> sys.stderr, "  Documents for parameter optimization:", optDocs
    discardedParameterCombinations = []

    print >> sys.stderr, "Dividing data into folds"
    corpusFolds = Example.makeCorpusFolds(corpusElements, options.folds[0])
    exampleSets = Example.divideExamples(examples, corpusFolds)
    
    keys = exampleSets.keys()
    keys.sort()
    evaluations = []
    for key in keys:
        testSet = exampleSets[key]
        for example in testSet:
            example[3]["visualizationSet"] = key + 1
        trainSet = []
        for key2 in keys:
            if key != key2:
                trainSet.extend(exampleSets[key2])
        print >> sys.stderr, "Fold", str(key + 1)
        # Create classifier object
        if options.output != None:
            if not os.path.exists(options.output+"/fold"+str(key+1)):
                os.mkdir(options.output+"/fold"+str(key+1))
#                if not os.path.exists(options.output+"/fold"+str(key+1)+"/classifier"):
#                    os.mkdir(options.output+"/fold"+str(key+1)+"/classifier")
            classifier = Classifier(workDir = options.output + "/fold"+str(key + 1))
        else:
            classifier = Classifier()
        classifier.featureSet = exampleBuilder.featureSet
        # Optimize ####################
        # Check whether there is need for included param opt set
        if parameterOptimizationSet == None and options.folds[1] == 0: # 8-1-1 folds
            assert(len(keys) > 1)
            if keys.index(key) == 0:
                parameterOptimizationSetKey = keys[-1]
            else:
                parameterOptimizationSetKey = keys[keys.index(key)-1]
            parameterOptimizationSet = exampleSets[parameterOptimizationSetKey]
            trainSet = []
            for key2 in keys:
                if key2 != key and key2 != parameterOptimizationSetKey:
                    trainSet.extend(exampleSets[key2])

        if parameterOptimizationSet != None: # constant external parameter optimization set
            evaluationArgs = {"classSet":exampleBuilder.classSet}
            if options.parameters != None:
                paramDict = splitParameters(options.parameters)
                bestResults = classifier.optimize([trainSet], [parameterOptimizationSet], paramDict, Evaluation, evaluationArgs, combinationsThatTimedOut=discardedParameterCombinations)
            else:
                bestResults = classifier.optimize([trainSet], [parameterOptimizationSet], evaluationClass=Evaluation, evaluationArgs=evaluationArgs, combinationsThatTimedOut=discardedParameterCombinations)
        else: # nested x-fold parameter optimization
            assert (options.folds[1] >= 2)
            optimizationFolds = Example.makeExampleFolds(trainSet, options.folds[1])
            optimizationSets = Example.divideExamples(trainSet, optimizationFolds)
            optimizationSetList = []
            optSetKeys = optimizationSets.keys()
            optSetKeys.sort()
            for optSetKey in optSetKeys:
                optimizationSetList.append(optimizationSets[optSetKey])
            evaluationArgs = {"classSet":exampleBuilder.classSet}
            if options.parameters != None:
                paramDict = splitParameters(options.parameters)
                bestResults = classifier.optimize(optimizationSetList, optimizationSetList, paramDict, Evaluation, evaluationArgs, combinationsThatTimedOut=discardedParameterCombinations)
            else:
                bestResults = classifier.optimize(optimizationSetList, optimizationSetList, evaluationClass=Evaluation, evaluationArgs=evaluationArgs, combinationsThatTimedOut=discardedParameterCombinations)
        
        # Classify
        print >> sys.stderr, "Classifying test data"
        bestParams = bestResults[2]
        if bestParams.has_key("timeout"):
            del bestParams["timeout"]
        print >> sys.stderr, "Parameters:", bestParams
        print >> sys.stderr, "Training",
        startTime = time.time()
        classifier.train(trainSet, bestParams)
        print >> sys.stderr, "(Time spent:", time.time() - startTime, "s)"
        print >> sys.stderr, "Testing",
        startTime = time.time()
        predictions = classifier.classify(testSet)
        if options.output != None:
            pdict = []
            fieldnames = ["class","prediction","id","fold"]
            for p in predictions:
                if "typed" in exampleBuilder.styles:
                    pdict.append( {"class":exampleBuilder.classSet.getName(p[0][1]), "prediction":exampleBuilder.classSet.getName(p[1]), "id":p[0][0], "fold":key} )
                else:
                    pdict.append( {"class":p[0][1], "prediction":p[1], "id":p[0][0], "fold":key} )
            TableUtils.addToCSV(pdict, options.output +"/predictions.csv", fieldnames)
        print >> sys.stderr, "(Time spent:", time.time() - startTime, "s)"
        
        # Calculate statistics
        evaluation = Evaluation(predictions, classSet=exampleBuilder.classSet)
        print >> sys.stderr, evaluation.toStringConcise()
        print >> sys.stderr, timer.toString()
        evaluations.append(evaluation)
        
        # Save example sets
        if options.output != None:
            print >> sys.stderr, "Saving example sets to", options.output
            Example.writeExamples(exampleSets[0], options.output +"/fold"+str(key+1) + "/examplesTest.txt")
            Example.writeExamples(exampleSets[1], options.output +"/fold"+str(key+1) + "/examplesTrain.txt")
            if parameterOptimizationSet == None:
                for k,v in optimizationSets.iteritems():
                    Example.writeExamples(v, options.output +"/fold"+str(key+1) + "/examplesOptimizationSet" + str(k) + ".txt")
            else:
                Example.writeExamples(parameterOptimizationSet, options.output +"/fold"+str(key+1) + "/examplesOptimizationSetPredefined.txt")
            TableUtils.writeCSV(bestResults[2], options.output +"/fold"+str(key+1) + "/parameters.csv")
            evaluation.saveCSV(options.output +"/fold"+str(key+1) + "/results.csv")
            print >> sys.stderr, "Compressing folder"
            zipTree(options.output, "fold"+str(key+1))
        
        parameterOptimizationSet = constantParameterOptimizationSet
    
    print >> sys.stderr, "Cross-validation Results"
    for i in range(len(evaluations)):
        print >> sys.stderr, evaluations[i].toStringConcise("  Fold "+str(i)+": ")
    averageResult = Evaluation.average(evaluations)
    print >> sys.stderr, averageResult.toStringConcise("  Avg: ")
    pooledResult = Evaluation.pool(evaluations)
    print >> sys.stderr, pooledResult.toStringConcise("  Pool: ")
    if options.output != None:
        for i in range(len(evaluations)):
            evaluations[i].saveCSV(options.output+"/results.csv", i)
        averageResult.saveCSV(options.output+"/results.csv", "Avg")
        pooledResult.saveCSV(options.output+"/results.csv", "Pool")
        averageResult.saveCSV(options.output+"/resultsAverage.csv")
        pooledResult.saveCSV(options.output+"/resultsPooled.csv")
    # Visualize
    if options.visualization != None:
        visualize(sentences, pooledResult.classifications, options, exampleBuilder)
    
    # Save interactionXML
    if options.resultsToXML != None:
        classSet = None
        if "typed" in exampleBuilder.styles:
            classSet = exampleBuilder.classSet
        Example.writeToInteractionXML(pooledResult.classifications, corpusElements, options.resultsToXML, classSet)
Пример #6
0
def crossValidate(exampleBuilder, corpusElements, examples, options, timer):
    parameterOptimizationSet = None
    constantParameterOptimizationSet = None
    if options.paramOptData != None:
        print >> sys.stderr, "Separating parameter optimization set"
        parameterOptimizationDivision = Example.makeCorpusDivision(
            corpusElements, float(options.paramOptData))
        exampleSets = Example.divideExamples(examples,
                                             parameterOptimizationDivision)
        constantParameterOptimizationSet = exampleSets[0]
        parameterOptimizationSet = constantParameterOptimizationSet
        optDocs = 0
        for k, v in parameterOptimizationDivision.iteritems():
            if v == 0:
                del corpusElements.documentsById[k]
                optDocs += 1
        print >> sys.stderr, "  Documents for parameter optimization:", optDocs
    discardedParameterCombinations = []

    print >> sys.stderr, "Dividing data into folds"
    corpusFolds = Example.makeCorpusFolds(corpusElements, options.folds[0])
    exampleSets = Example.divideExamples(examples, corpusFolds)

    keys = exampleSets.keys()
    keys.sort()
    evaluations = []
    for key in keys:
        testSet = exampleSets[key]
        for example in testSet:
            example[3]["visualizationSet"] = key + 1
        trainSet = []
        for key2 in keys:
            if key != key2:
                trainSet.extend(exampleSets[key2])
        print >> sys.stderr, "Fold", str(key + 1)
        # Create classifier object
        if options.output != None:
            if not os.path.exists(options.output + "/fold" + str(key + 1)):
                os.mkdir(options.output + "/fold" + str(key + 1))


#                if not os.path.exists(options.output+"/fold"+str(key+1)+"/classifier"):
#                    os.mkdir(options.output+"/fold"+str(key+1)+"/classifier")
            classifier = Classifier(workDir=options.output + "/fold" +
                                    str(key + 1))
        else:
            classifier = Classifier()
        classifier.featureSet = exampleBuilder.featureSet
        # Optimize ####################
        # Check whether there is need for included param opt set
        if parameterOptimizationSet == None and options.folds[
                1] == 0:  # 8-1-1 folds
            assert (len(keys) > 1)
            if keys.index(key) == 0:
                parameterOptimizationSetKey = keys[-1]
            else:
                parameterOptimizationSetKey = keys[keys.index(key) - 1]
            parameterOptimizationSet = exampleSets[parameterOptimizationSetKey]
            trainSet = []
            for key2 in keys:
                if key2 != key and key2 != parameterOptimizationSetKey:
                    trainSet.extend(exampleSets[key2])

        if parameterOptimizationSet != None:  # constant external parameter optimization set
            evaluationArgs = {"classSet": exampleBuilder.classSet}
            if options.parameters != None:
                paramDict = splitParameters(options.parameters)
                bestResults = classifier.optimize(
                    [trainSet], [parameterOptimizationSet],
                    paramDict,
                    Evaluation,
                    evaluationArgs,
                    combinationsThatTimedOut=discardedParameterCombinations)
            else:
                bestResults = classifier.optimize(
                    [trainSet], [parameterOptimizationSet],
                    evaluationClass=Evaluation,
                    evaluationArgs=evaluationArgs,
                    combinationsThatTimedOut=discardedParameterCombinations)
        else:  # nested x-fold parameter optimization
            assert (options.folds[1] >= 2)
            optimizationFolds = Example.makeExampleFolds(
                trainSet, options.folds[1])
            optimizationSets = Example.divideExamples(trainSet,
                                                      optimizationFolds)
            optimizationSetList = []
            optSetKeys = optimizationSets.keys()
            optSetKeys.sort()
            for optSetKey in optSetKeys:
                optimizationSetList.append(optimizationSets[optSetKey])
            evaluationArgs = {"classSet": exampleBuilder.classSet}
            if options.parameters != None:
                paramDict = splitParameters(options.parameters)
                bestResults = classifier.optimize(
                    optimizationSetList,
                    optimizationSetList,
                    paramDict,
                    Evaluation,
                    evaluationArgs,
                    combinationsThatTimedOut=discardedParameterCombinations)
            else:
                bestResults = classifier.optimize(
                    optimizationSetList,
                    optimizationSetList,
                    evaluationClass=Evaluation,
                    evaluationArgs=evaluationArgs,
                    combinationsThatTimedOut=discardedParameterCombinations)

        # Classify
        print >> sys.stderr, "Classifying test data"
        bestParams = bestResults[2]
        if bestParams.has_key("timeout"):
            del bestParams["timeout"]
        print >> sys.stderr, "Parameters:", bestParams
        print >> sys.stderr, "Training",
        startTime = time.time()
        classifier.train(trainSet, bestParams)
        print >> sys.stderr, "(Time spent:", time.time() - startTime, "s)"
        print >> sys.stderr, "Testing",
        startTime = time.time()
        predictions = classifier.classify(testSet)
        if options.output != None:
            pdict = []
            fieldnames = ["class", "prediction", "id", "fold"]
            for p in predictions:
                if "typed" in exampleBuilder.styles:
                    pdict.append({
                        "class":
                        exampleBuilder.classSet.getName(p[0][1]),
                        "prediction":
                        exampleBuilder.classSet.getName(p[1]),
                        "id":
                        p[0][0],
                        "fold":
                        key
                    })
                else:
                    pdict.append({
                        "class": p[0][1],
                        "prediction": p[1],
                        "id": p[0][0],
                        "fold": key
                    })
            TableUtils.addToCSV(pdict, options.output + "/predictions.csv",
                                fieldnames)
        print >> sys.stderr, "(Time spent:", time.time() - startTime, "s)"

        # Calculate statistics
        evaluation = Evaluation(predictions, classSet=exampleBuilder.classSet)
        print >> sys.stderr, evaluation.toStringConcise()
        print >> sys.stderr, timer.toString()
        evaluations.append(evaluation)

        # Save example sets
        if options.output != None:
            print >> sys.stderr, "Saving example sets to", options.output
            Example.writeExamples(
                exampleSets[0],
                options.output + "/fold" + str(key + 1) + "/examplesTest.txt")
            Example.writeExamples(
                exampleSets[1],
                options.output + "/fold" + str(key + 1) + "/examplesTrain.txt")
            if parameterOptimizationSet == None:
                for k, v in optimizationSets.iteritems():
                    Example.writeExamples(
                        v, options.output + "/fold" + str(key + 1) +
                        "/examplesOptimizationSet" + str(k) + ".txt")
            else:
                Example.writeExamples(
                    parameterOptimizationSet, options.output + "/fold" +
                    str(key + 1) + "/examplesOptimizationSetPredefined.txt")
            TableUtils.writeCSV(
                bestResults[2],
                options.output + "/fold" + str(key + 1) + "/parameters.csv")
            evaluation.saveCSV(options.output + "/fold" + str(key + 1) +
                               "/results.csv")
            print >> sys.stderr, "Compressing folder"
            zipTree(options.output, "fold" + str(key + 1))

        parameterOptimizationSet = constantParameterOptimizationSet

    print >> sys.stderr, "Cross-validation Results"
    for i in range(len(evaluations)):
        print >> sys.stderr, evaluations[i].toStringConcise("  Fold " +
                                                            str(i) + ": ")
    averageResult = Evaluation.average(evaluations)
    print >> sys.stderr, averageResult.toStringConcise("  Avg: ")
    pooledResult = Evaluation.pool(evaluations)
    print >> sys.stderr, pooledResult.toStringConcise("  Pool: ")
    if options.output != None:
        for i in range(len(evaluations)):
            evaluations[i].saveCSV(options.output + "/results.csv", i)
        averageResult.saveCSV(options.output + "/results.csv", "Avg")
        pooledResult.saveCSV(options.output + "/results.csv", "Pool")
        averageResult.saveCSV(options.output + "/resultsAverage.csv")
        pooledResult.saveCSV(options.output + "/resultsPooled.csv")
    # Visualize
    if options.visualization != None:
        visualize(sentences, pooledResult.classifications, options,
                  exampleBuilder)

    # Save interactionXML
    if options.resultsToXML != None:
        classSet = None
        if "typed" in exampleBuilder.styles:
            classSet = exampleBuilder.classSet
        Example.writeToInteractionXML(pooledResult.classifications,
                                      corpusElements, options.resultsToXML,
                                      classSet)
Пример #7
0
def analyzeLengths(corpusElements):
    interactionEdges = 0
    dependencyEdges = 0
    pathsByLength = {}
    pathsBetweenAllEntitiesByLength = {}
    for sentence in corpusElements.sentences:
        sentenceGraph = sentence.sentenceGraph
        #interactionEdges += len(sentenceGraph.interactionGraph.edges())
        interactionEdges += len(sentence.interactions)
        dependencyEdges += len(sentenceGraph.dependencyGraph.edges())

        undirected = sentenceGraph.dependencyGraph.to_undirected()
        paths = NX10.all_pairs_shortest_path(undirected, cutoff=999)
        # Shortest path for interaction edge
        for interaction in sentence.interactions:
            e1 = sentence.entitiesById[interaction.attrib["e1"]]
            e2 = sentence.entitiesById[interaction.attrib["e2"]]
            t1 = sentenceGraph.entityHeadTokenByEntity[e1]
            t2 = sentenceGraph.entityHeadTokenByEntity[e2]
            if paths.has_key(t1) and paths[t1].has_key(t2):
                path = paths[t1][t2]
                if not pathsByLength.has_key(len(path) - 1):
                    pathsByLength[len(path) - 1] = 0
                pathsByLength[len(path) - 1] += 1
            else:
                if not pathsByLength.has_key("none"):
                    pathsByLength["none"] = 0
                pathsByLength["none"] += 1

#        for intEdge in sentenceGraph.interactionGraph.edges():
#            if paths.has_key(intEdge[0]) and paths[intEdge[0]].has_key(intEdge[1]):
#                path = paths[intEdge[0]][intEdge[1]]
#                if not pathsByLength.has_key(len(path)-1):
#                    pathsByLength[len(path)-1] = 0
#                pathsByLength[len(path)-1] += 1
#            else:
#                if not pathsByLength.has_key("none"):
#                    pathsByLength["none"] = 0
#                pathsByLength["none"] += 1
# Shortest paths between all entities
        for i in range(len(sentence.entities) - 1):
            for j in range(i + 1, len(sentence.entities)):
                tI = sentenceGraph.entityHeadTokenByEntity[
                    sentence.entities[i]]
                tJ = sentenceGraph.entityHeadTokenByEntity[
                    sentence.entities[j]]
                if paths.has_key(tI) and paths[tI].has_key(tJ):
                    path = paths[tI][tJ]
                    if not pathsBetweenAllEntitiesByLength.has_key(
                            len(path) - 1):
                        pathsBetweenAllEntitiesByLength[len(path) - 1] = 0
                    pathsBetweenAllEntitiesByLength[len(path) - 1] += 1
                elif tI == tJ:
                    if not pathsBetweenAllEntitiesByLength.has_key(0):
                        pathsBetweenAllEntitiesByLength[0] = 0
                    pathsBetweenAllEntitiesByLength[0] += 1
                else:
                    if not pathsBetweenAllEntitiesByLength.has_key("none"):
                        pathsBetweenAllEntitiesByLength["none"] = 0
                    pathsBetweenAllEntitiesByLength["none"] += 1


#        for i in range(len(sentenceGraph.tokens)-1):
#            for j in range(i+1,len(sentenceGraph.tokens)):
#                tI = sentenceGraph.tokens[i]
#                tJ = sentenceGraph.tokens[j]
#                if sentenceGraph.tokenIsEntityHead[tI] == None or sentenceGraph.tokenIsEntityHead[tJ] == None:
#                    continue
#                if paths.has_key(tI) and paths[tI].has_key(tJ):
#                    path = paths[tI][tJ]
#                    if not pathsBetweenAllEntitiesByLength.has_key(len(path)-1):
#                        pathsBetweenAllEntitiesByLength[len(path)-1] = 0
#                    pathsBetweenAllEntitiesByLength[len(path)-1] += 1
#                else:
#                    if not pathsBetweenAllEntitiesByLength.has_key("none"):
#                        pathsBetweenAllEntitiesByLength["none"] = 0
#                    pathsBetweenAllEntitiesByLength["none"] += 1

    print >> sys.stderr, "Interaction edges:", interactionEdges
    print >> sys.stderr, "Dependency edges:", dependencyEdges
    print >> sys.stderr, "Shortest path of dependencies for interaction edge:"
    printPathDistribution(pathsByLength)
    if options.output != None:
        pathsByLength["corpus"] = options.input
        pathsByLength["parse"] = options.parse
        TableUtils.addToCSV(pathsByLength,
                            options.output + "/pathsByLength.csv")
    print >> sys.stderr, "Shortest path of dependencies between all entities:"
    printPathDistribution(pathsBetweenAllEntitiesByLength)
    if options.output != None:
        pathsByLength["corpus"] = options.input
        pathsByLength["parse"] = options.parse
        TableUtils.addToCSV(
            pathsBetweenAllEntitiesByLength,
            options.output + "/pathsBetweenAllEntitiesByLength.csv")
Пример #8
0
def analyzeLengths(corpusElements):
    interactionEdges = 0
    dependencyEdges = 0
    pathsByLength = {}
    pathsBetweenAllEntitiesByLength = {}
    for sentence in corpusElements.sentences:
        sentenceGraph = sentence.sentenceGraph
        #interactionEdges += len(sentenceGraph.interactionGraph.edges())
        interactionEdges += len(sentence.interactions)
        dependencyEdges += len(sentenceGraph.dependencyGraph.edges())
        
        undirected = sentenceGraph.dependencyGraph.to_undirected()
        paths = NX10.all_pairs_shortest_path(undirected, cutoff=999)
        # Shortest path for interaction edge
        for interaction in sentence.interactions:
            e1 = sentence.entitiesById[interaction.attrib["e1"]]
            e2 = sentence.entitiesById[interaction.attrib["e2"]]
            t1 = sentenceGraph.entityHeadTokenByEntity[e1]
            t2 = sentenceGraph.entityHeadTokenByEntity[e2]
            if paths.has_key(t1) and paths[t1].has_key(t2):
                path = paths[t1][t2]
                if not pathsByLength.has_key(len(path)-1):
                    pathsByLength[len(path)-1] = 0
                pathsByLength[len(path)-1] += 1
            else:
                if not pathsByLength.has_key("none"):
                    pathsByLength["none"] = 0
                pathsByLength["none"] += 1

#        for intEdge in sentenceGraph.interactionGraph.edges():
#            if paths.has_key(intEdge[0]) and paths[intEdge[0]].has_key(intEdge[1]):
#                path = paths[intEdge[0]][intEdge[1]]
#                if not pathsByLength.has_key(len(path)-1):
#                    pathsByLength[len(path)-1] = 0
#                pathsByLength[len(path)-1] += 1
#            else:
#                if not pathsByLength.has_key("none"):
#                    pathsByLength["none"] = 0
#                pathsByLength["none"] += 1
        # Shortest paths between all entities
        for i in range(len(sentence.entities)-1):
            for j in range(i+1,len(sentence.entities)):
                tI = sentenceGraph.entityHeadTokenByEntity[sentence.entities[i]]
                tJ = sentenceGraph.entityHeadTokenByEntity[sentence.entities[j]]
                if paths.has_key(tI) and paths[tI].has_key(tJ):
                    path = paths[tI][tJ]
                    if not pathsBetweenAllEntitiesByLength.has_key(len(path)-1):
                        pathsBetweenAllEntitiesByLength[len(path)-1] = 0
                    pathsBetweenAllEntitiesByLength[len(path)-1] += 1
                elif tI == tJ:
                    if not pathsBetweenAllEntitiesByLength.has_key(0):
                        pathsBetweenAllEntitiesByLength[0] = 0
                    pathsBetweenAllEntitiesByLength[0] += 1
                else:
                    if not pathsBetweenAllEntitiesByLength.has_key("none"):
                        pathsBetweenAllEntitiesByLength["none"] = 0
                    pathsBetweenAllEntitiesByLength["none"] += 1

#        for i in range(len(sentenceGraph.tokens)-1):
#            for j in range(i+1,len(sentenceGraph.tokens)):
#                tI = sentenceGraph.tokens[i]
#                tJ = sentenceGraph.tokens[j]
#                if sentenceGraph.tokenIsEntityHead[tI] == None or sentenceGraph.tokenIsEntityHead[tJ] == None:
#                    continue
#                if paths.has_key(tI) and paths[tI].has_key(tJ):
#                    path = paths[tI][tJ]
#                    if not pathsBetweenAllEntitiesByLength.has_key(len(path)-1):
#                        pathsBetweenAllEntitiesByLength[len(path)-1] = 0
#                    pathsBetweenAllEntitiesByLength[len(path)-1] += 1
#                else:
#                    if not pathsBetweenAllEntitiesByLength.has_key("none"):
#                        pathsBetweenAllEntitiesByLength["none"] = 0
#                    pathsBetweenAllEntitiesByLength["none"] += 1
    
    print >> sys.stderr, "Interaction edges:", interactionEdges
    print >> sys.stderr, "Dependency edges:", dependencyEdges
    print >> sys.stderr, "Shortest path of dependencies for interaction edge:"
    printPathDistribution(pathsByLength)
    if options.output != None:
        pathsByLength["corpus"] = options.input
        pathsByLength["parse"] = options.parse
        TableUtils.addToCSV(pathsByLength, options.output+"/pathsByLength.csv")
    print >> sys.stderr, "Shortest path of dependencies between all entities:"
    printPathDistribution(pathsBetweenAllEntitiesByLength)
    if options.output != None:
        pathsByLength["corpus"] = options.input
        pathsByLength["parse"] = options.parse
        TableUtils.addToCSV(pathsBetweenAllEntitiesByLength, options.output+"/pathsBetweenAllEntitiesByLength.csv")