コード例 #1
0
def postAnalyzeFile(outputFile, bngLocation, database):
    """
    Performs a postcreation file analysis based on context information
    """
    #print('Transforming generated BNG file to BNG-XML representation for analysis')

    postAnalysisHelper(outputFile, bngLocation, database)

    # recreate file using information from the post analysis
    returnArray = analyzeHelper(database.document, database.reactionDefinitions, database.useID,
                                outputFile, database.speciesEquivalence, database.atomize, database.translator, database)
    with open(outputFile, 'w') as f:
        f.write(returnArray.finalString)
    # recompute bng-xml file
    consoleCommands.bngl2xml(outputFile)
    bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
    # recompute context information
    contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)

    # get those species patterns that follow uncommon motifs
    motifSpecies, motifDefinitions = contextAnalysis.processContextMotifInformation(database.assumptions['lexicalVsstoch'], database)
    #motifSpecies, motifDefinitions = contextAnalysis.processAllContextInformation()
    if len(motifDefinitions) > 0:
        logMess('INFO:CTX003', 'Species with suspect context information were found. Information is being dumped to {0}_context.log'.format(outputFile))
        with open('{0}_context.log'.format(outputFile), 'w') as f:
            pprint.pprint(dict(motifSpecies), stream=f)
            pprint.pprint(motifDefinitions, stream=f)
コード例 #2
0
def main():
    fileName = 'complex/output19.bngl'
    console.bngl2xml(fileName)
    species, rules, par = readBNGXML.parseXML('output19.xml')
    #print rules

    transformationCenter = []
    transformationContext = []

    #extract the context of such reactions
    for idx, rule in enumerate(rules):
        tatomicArray, ttransformationCenter, ttransformationContext, \
                tproductElements,tactionNames,tlabelArray = extractAtomic.extractTransformations([rule])
        #atomicArray.append(tatomicArray)
        transformationCenter.append(ttransformationCenter)
        transformationContext.append(ttransformationContext)
    redundantDict, patternDictList = extractRedundantContext(
        rules, transformationCenter, transformationContext)
    #print redundantDict
    #construct rule based patterns based on the reaction patterns they have to match and
    #the ones they  have to discriminate

    for center in patternDictList:
        for rate in patternDictList[center]:
            match = patternDictList[center][rate]
            notContext = []
            for cRate in [x for x in patternDictList[center] if x != rate]:
                notContext.append([
                    transformationContext[x]
                    for x in redundantDict[center][cRate]
                ])
            ruleSet = [rules[x] for x in redundantDict[center][rate]]
            createMetaRule(ruleSet, match)

    newRules = range(0, len(rules))
    for center in redundantDict:
        for context in redundantDict[center]:
            for element in range(1, len(redundantDict[center][context])):
                newRules.remove(redundantDict[center][context][element])

    #for element in newRules:
    #    print str(rules[element][0])

    newRulesArray = []
    for element in newRules:
        newRulesArray.append('{0}\n'.format(str(rules[element][0])))
    lines = readFile(fileName)
    startR = lines.index('begin reaction rules\n')
    endR = lines.index('end reaction rules\n')
    startP = lines.index('begin parameters\n')
    endP = lines.index('end parameters\n')

    newPar = findNewParameters(lines[startP + 1:endP], par)
    newLines = lines[0:endP] + newPar + lines[
        endP:startR + 1] + newRulesArray + lines[endR:len(lines)]

    f = open(fileName + 'reduced.bngl', 'w')
    f.writelines(newLines)
    '''
コード例 #3
0
ファイル: contextAnalyzer.py プロジェクト: jjtapia/SBMLparser
def main():
    fileName = 'complex/output19.bngl'
    console.bngl2xml(fileName)
    species,rules,par= readBNGXML.parseXML('output19.xml')
    #print rules
    
    
    transformationCenter = []
    transformationContext = []

    #extract the context of such reactions
    for idx,rule in enumerate(rules):
        tatomicArray, ttransformationCenter, ttransformationContext, \
                tproductElements,tactionNames,tlabelArray = extractAtomic.extractTransformations([rule])
    #atomicArray.append(tatomicArray)
        transformationCenter.append(ttransformationCenter)
        transformationContext.append(ttransformationContext)
    redundantDict,patternDictList = extractRedundantContext(rules,transformationCenter,transformationContext)
    #print redundantDict
    #construct rule based patterns based on the reaction patterns they have to match and
    #the ones they  have to discriminate
    
    for center in patternDictList:
        for rate in patternDictList[center]:
            match = patternDictList[center][rate]
            notContext = []
            for cRate in [x for x in patternDictList[center] if x != rate]:
                notContext.append([transformationContext[x] for x in redundantDict[center][cRate]])
            ruleSet = [rules[x] for x in redundantDict[center][rate]]
            createMetaRule(ruleSet,match)
    
    
    newRules = range(0,len(rules))
    for center in redundantDict:
        for context in redundantDict[center]:
            for element in range(1,len(redundantDict[center][context])):
                newRules.remove(redundantDict[center][context][element])
    
    #for element in newRules:
    #    print str(rules[element][0])
       
    newRulesArray = []
    for element in newRules:
        newRulesArray.append('{0}\n'.format(str(rules[element][0])))
    lines = readFile(fileName)
    startR = lines.index('begin reaction rules\n')
    endR = lines.index('end reaction rules\n')
    startP = lines.index('begin parameters\n')
    endP = lines.index('end parameters\n')
    
    newPar = findNewParameters(lines[startP+1:endP],par)
    newLines = lines[0:endP] + newPar + lines[endP:startR+1] + newRulesArray + lines[endR:len(lines)]
    
    f = open(fileName + 'reduced.bngl','w')
    f.writelines(newLines)
    '''
コード例 #4
0
ファイル: libsbml2bngl.py プロジェクト: jjtapia/SBMLparser
def postAnalyzeFile(outputFile, bngLocation, database):
    """
    Performs a postcreation file analysis based on context information
    """
    #print('Transforming generated BNG file to BNG-XML representation for analysis')
    consoleCommands.setBngExecutable(bngLocation)
    outputDir = os.sep.join(outputFile.split(os.sep)[:-1])
    if outputDir != '':
        retval = os.getcwd()
        os.chdir(outputDir)
    consoleCommands.bngl2xml(outputFile.split(os.sep)[-1])
    if outputDir != '':
        os.chdir(retval)
    bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
    #print('Sending BNG-XML file to context analysis engine')
    contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)
    # analysis of redundant bonds
    deleteBonds = contextAnalysis.analyzeRedundantBonds(database.assumptions['redundantBonds'])

    modificationFlag = True

    for molecule in database.assumptions['redundantBondsMolecules']:
        if molecule[0] in deleteBonds:
            for bond in deleteBonds[molecule[0]]:
                database.translator[molecule[1]].deleteBond(bond)
                logMess('INFO:Atomization', 'Used context information to determine that the bond {0} in species {1} is not likely'.format(bond,molecule[1]))

    returnArray = analyzeHelper(database.document, database.reactionDefinitions, database.useID,
                                outputFile, database.speciesEquivalence, database.atomize, database.translator)
    with open(outputFile, 'w') as f:
        f.write(returnArray.finalString)
    # recompute bng-xml file
    consoleCommands.bngl2xml(outputFile)
    bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
    # recompute context information
    contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)

    # get those species patterns that follow uncommon motifs
    motifSpecies, motifDefinitions = contextAnalysis.processContextMotifInformation(database.assumptions['lexicalVsstoch'], database)
    #motifSpecies, motifDefinitions = contextAnalysis.processAllContextInformation()
    if len(motifDefinitions) > 0:
        logMess('WARNING:ContextAnalysis', 'Species with suspect context information were found. Information is being dumped to {0}_context.log'.format(outputFile))
        with open('{0}_context.log'.format(outputFile), 'w') as f:
            pprint.pprint(dict(motifSpecies), stream=f)
            pprint.pprint(motifDefinitions, stream=f)
コード例 #5
0
def postAnalysisHelper(outputFile, bngLocation, database):
    consoleCommands.setBngExecutable(bngLocation)
    outputDir = os.sep.join(outputFile.split(os.sep)[:-1])
    if outputDir != '':
        retval = os.getcwd()
        os.chdir(outputDir)
    consoleCommands.bngl2xml(outputFile.split(os.sep)[-1])
    if outputDir != '':
        os.chdir(retval)
    bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
    #print('Sending BNG-XML file to context analysis engine')
    contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)
    # analysis of redundant bonds
    deleteBonds = contextAnalysis.analyzeRedundantBonds(database.assumptions['redundantBonds'])
    for molecule in database.assumptions['redundantBondsMolecules']:
        if molecule[0] in deleteBonds:
            for bond in deleteBonds[molecule[0]]:
                database.translator[molecule[1]].deleteBond(bond)
                logMess('INFO:CTX002', 'Used context information to determine that the bond {0} in species {1} is not likely'.format(bond,molecule[1]))
コード例 #6
0
def extractStatistics():
    number = 151
    console.bngl2xml('complex/output{0}.bngl'.format(number))
    species, rules, parameterDict = readBNGXML.parseXML(
        'output{0}.xml'.format(number))
    #print rules

    transformationCenter = []
    transformationContext = []

    k = []
    actions = Counter()
    actionSet = Counter()
    for idx, rule in enumerate(rules):
        tatomicArray, ttransformationCenter, ttransformationContext, \
                tproductElements,tactionNames,tlabelArray = extractAtomic.extractTransformations([rule])
        #atomicArray.append(tatomicArray)
        transformationCenter.append(ttransformationCenter)
        transformationContext.append(ttransformationContext)
        k.append(len(rule[0].actions))
        #if len(rule[0].actions) > 3:
        #    print rule[0].reactants
        actions.update([x.action for x in rule[0].actions])
        tmp = [x.action for x in rule[0].actions]
        tmp.sort()
        actionSet.update([tuple(tmp)])

    #print actions
    #print actionSet
    print 'number of species', len(species)
    print 'avg number o actions', np.average(k), np.std(k)
    centerDict = groupByReactionCenter(transformationCenter)
    print 'singletons', len(
        {x: centerDict[x]
         for x in centerDict if len(centerDict[x]) == 1})
    tmp = [[tuple(set(x)), len(centerDict[x])] for x in centerDict]
    #reactionCenterGraph(species,tmp)
    #tmp.sort(key=lambda x:x[1],reverse=True)
    print 'number of reaction centers', len(centerDict.keys())
    print 'number of rules', len(rules)

    #print 'unique',[x for x in centerDict[element] if len(centerDict[element]) == 1]
    redundantDict = groupByReactionCenterAndRateAndActions(rules, centerDict)
    #print redundantDict
    tmp2 = [('$\\tt{{{0}}}$'.format(tuple(set(x))), tuple(set(y[:-1])), y[-1],
             len(redundantDict[x][y])) for x in redundantDict
            for y in redundantDict[x] if 'kr' not in y[-1]]
    tmp2 = set(tmp2)
    tmp2 = list(tmp2)
    tmp2.sort(key=lambda x: x[3], reverse=True)
    tmp2.sort(key=lambda x: x[0], reverse=True)

    tmp2 = [
        '{0} & {1} & {2} & {3}\\\\\n'.format(element[0], element[1],
                                             element[2], element[3])
        for element in tmp2
    ]

    with open('table.tex', 'w') as f:
        f.write('\\begin{tabular}{|cccc|}\n')
        f.write('\\hline\n')
        f.write('Reaction Centers & Action & Score\\\\\\hline\n')
        for element in tmp2:
            f.write(element)
        f.write('\\hline\n')
        f.write('\\end{tabular}\n')

    #print redundantDict
    x = [len(centerDict[x]) for x in centerDict]
    #122,138
    print 'average number of reactions with same rate and reaction cneter', np.average(
        x), np.std(x)
    print 'total number of clusters', len(x)
    print x
コード例 #7
0
ファイル: contextAnalyzer.py プロジェクト: jjtapia/SBMLparser
def extractStatistics():
    number = 151
    console.bngl2xml('complex/output{0}.bngl'.format(number))
    species,rules,parameterDict= readBNGXML.parseXML('output{0}.xml'.format(number))
    #print rules
    
    
    transformationCenter = []
    transformationContext = []

    k = []
    actions = Counter()
    actionSet = Counter()
    for idx,rule in enumerate(rules):
        tatomicArray, ttransformationCenter, ttransformationContext, \
                tproductElements,tactionNames,tlabelArray = extractAtomic.extractTransformations([rule])
    #atomicArray.append(tatomicArray)
        transformationCenter.append(ttransformationCenter)
        transformationContext.append(ttransformationContext)
        k.append(len(rule[0].actions))
        #if len(rule[0].actions) > 3:
        #    print rule[0].reactants
        actions.update([x.action for x in rule[0].actions])
        tmp = [x.action for x in rule[0].actions]
        tmp.sort()
        actionSet.update([tuple(tmp)])
    
    
    #print actions
    #print actionSet
    print 'number of species',len(species)
    print 'avg number o actions',np.average(k),np.std(k)
    centerDict = groupByReactionCenter(transformationCenter)
    print 'singletons',len({x:centerDict[x] for x in centerDict if len(centerDict[x]) == 1})
    tmp = [[tuple(set(x)),len(centerDict[x])] for x in centerDict]
    #reactionCenterGraph(species,tmp)
    #tmp.sort(key=lambda x:x[1],reverse=True)
    print 'number of reaction centers',len(centerDict.keys())
    print 'number of rules',len(rules)
    
    #print 'unique',[x for x in centerDict[element] if len(centerDict[element]) == 1]
    redundantDict = groupByReactionCenterAndRateAndActions(rules,centerDict)
    #print redundantDict
    tmp2 = [('$\\tt{{{0}}}$'.format(tuple(set(x))),tuple(set(y[:-1])),y[-1],len (redundantDict[x][y])) for x in redundantDict for y in redundantDict[x] if 'kr' not in y[-1]]
    tmp2 = set(tmp2)
    tmp2 = list(tmp2)
    tmp2.sort(key=lambda x:x[3],reverse=True)
    tmp2.sort(key=lambda x:x[0],reverse=True)

    tmp2 = ['{0} & {1} & {2} & {3}\\\\\n'.format(element[0],element[1],element[2],element[3]) for element in tmp2]
    
    with open('table.tex','w') as f:
        f.write('\\begin{tabular}{|cccc|}\n')
        f.write('\\hline\n')
        f.write('Reaction Centers & Action & Score\\\\\\hline\n')
        for element in tmp2:
            f.write(element)
        f.write('\\hline\n')
        f.write('\\end{tabular}\n')

    #print redundantDict
    x = [len(centerDict[x]) for x in centerDict]    
    #122,138
    print 'average number of reactions with same rate and reaction cneter',np.average(x),np.std(x) 
    print 'total number of clusters',len(x)
    print x
コード例 #8
0
ファイル: informationContent.py プロジェクト: pgetta/atomizer
def getInformationContent(fileName):
    console.bngl2xml(fileName,timeout=30)
    xmlFileName = '.'.join(fileName.split('.')[:-1])+'.xml'
    xmlFileName =xmlFileName.split('/')[-1]
    species,reactions,_= readBNGXML.parseXML(xmlFileName)
    calculateSpeciesInformationContent(species)
コード例 #9
0
def main():
    with open('linkArray.dump', 'rb') as f:
        linkArray = pickle.load(f)
    with open('xmlAnnotationsExtended.dump', 'rb') as f:
        annotations = pickle.load(f)

    speciesEquivalence = {}
    onlyDicts = [x for x in listdir('./complex')]
    onlyDicts = [x for x in onlyDicts if '.bngl.dict' in x]

    for x in onlyDicts:
        with open('complex/{0}'.format(x), 'rb') as f:
            speciesEquivalence[int(x.split('.')[0][6:])] = pickle.load(f)

    for cidx, cluster in enumerate(linkArray):
        #FIXME:only do the first cluster
        cidx = 0
        cluster = linkArray[0]
        if len(cluster) == 1:
            continue
        annotationsDict = {idx: x for idx, x in enumerate(annotations)}
        annotationsIncidence = {}

        #get the model intersection points

        for element in annotationsDict:
            for species in annotationsDict[element]:
                key = annotationsDict[element][species][1]
                if key not in annotationsIncidence:
                    annotationsIncidence[key] = set([])
                annotationsIncidence[key].add(element)

        #number = 19
        graph = nx.Graph()
        nodeDict = {}
        superNodeDict = {}

        for element in annotationsIncidence:
            if len(annotationsIncidence[element]) > 1:
                tmpEdgeList = []
                names = []
                for idx in annotationsIncidence[element]:
                    for species in [
                            x for x in annotationsDict[idx]
                            if annotationsDict[idx][x][1] == element
                    ]:
                        try:
                            print(speciesEquivalence[idx])
                            if isinstance(speciesEquivalence[idx][species],
                                          str):
                                tmpEdgeList.append('{0}_{1}'.format(
                                    idx,
                                    speciesEquivalence[idx][species][0:-2]))
                                names.append(
                                    speciesEquivalence[idx][species][0:-2])
                            else:
                                tmpEdgeList.append('{0}_{1}'.format(
                                    idx, speciesEquivalence[idx]
                                    [species].molecules[0].name))
                                names.append(speciesEquivalence[idx]
                                             [species].molecules[0].name)
                        except:
                            print('----', 'error', idx, species)
                            pass
                names = [x.lower() for x in names]
                name = Counter(names).most_common(5)
                name = [x for x in name if len(x[0]) > 1]
                if len(name) == 0:
                    name = Counter(names).most_common(1)
                for edge in tmpEdgeList:
                    superNodeDict[edge] = name[0][0]
                if len(name) > 0:
                    superNodeDict[name[0][0]] = len(tmpEdgeList)
                #pairs = [(x, y, 1) for x in tmpEdgeList for y in tmpEdgeList if x!= y]
                #graph.add_weighted_edges_from(pairs)
        print superNodeDict
        for x in cluster:
            number = x - 1
            try:
                console.bngl2xml('complex/output{0}.bngl'.format(number))
                species, rules, parameterDict, observableList = readBNGXML.parseXML(
                    'output{0}.xml'.format(number))

            except:
                continue
            nodeDict[number] = simpleGraph(graph, species, observableList,
                                           number, superNodeDict)

        nx.write_gml(graph, 'cgraph_{0}.gml'.format(cidx))
        break
コード例 #10
0
ファイル: contactMap.py プロジェクト: jjtapia/atomizer
def main():
    with open('linkArray.dump', 'rb') as f:
        linkArray = pickle.load(f)
    with open('xmlAnnotationsExtended.dump','rb') as f:
        annotations = pickle.load(f)
        
    speciesEquivalence = {}
    onlyDicts = [ x for x in listdir('./complex')]
    onlyDicts = [x for x in onlyDicts if '.bngl.dict' in x]

    for x in onlyDicts:
        with open('complex/{0}'.format(x),'rb') as f:
            speciesEquivalence[int(x.split('.')[0][6:])] = pickle.load(f)
        
    
    for cidx,cluster in enumerate(linkArray):
        #FIXME:only do the first cluster
        cidx = 0
        cluster = linkArray[0]
        if len(cluster) == 1:
            continue
        annotationsDict = {idx:x for idx,x in enumerate(annotations)}
        annotationsIncidence = {}

        #get the model intersection points

        for element in annotationsDict:
            for species in annotationsDict[element]:
                key = annotationsDict[element][species][1]
                if key not in annotationsIncidence:
                    annotationsIncidence[key] = set([])
                annotationsIncidence[key].add(element)

        #number = 19
        graph = nx.Graph()
        nodeDict = {}
        superNodeDict = {}
    
        for element in annotationsIncidence:
            if len(annotationsIncidence[element]) > 1:
                tmpEdgeList = []
                names= []
                for idx in annotationsIncidence[element]:
                    for species in [x for x in annotationsDict[idx] if annotationsDict[idx][x][1] == element]:
                         try:
                             print speciesEquivalence[idx]
                             if isinstance(speciesEquivalence[idx][species],str):
                                 tmpEdgeList.append('{0}_{1}'.format(idx,speciesEquivalence[idx][species][0:-2]))
                                 names.append(speciesEquivalence[idx][species][0:-2])
                             else:
                                 tmpEdgeList.append('{0}_{1}'.format(idx,speciesEquivalence[idx][species].molecules[0].name))
                                 names.append(speciesEquivalence[idx][species].molecules[0].name)
                         except:
                            
                            print '----','error',idx,species
                            pass
                names = [x.lower() for x in names]
                name =  Counter(names).most_common(5)
                name = [x for x in name if len(x[0])> 1]
                if len(name) == 0:
                    name = Counter(names).most_common(1)
                for edge in tmpEdgeList:
                    superNodeDict[edge] = name[0][0]
                if len(name) > 0:
                    superNodeDict[name[0][0]] = len(tmpEdgeList)
                #pairs = [(x,y,1) for x in tmpEdgeList for y in tmpEdgeList if x!= y] 
                #graph.add_weighted_edges_from(pairs)
        print superNodeDict
        for x in cluster:
            number = x-1
            try:
                console.bngl2xml('complex/output{0}.bngl'.format(number))
                species,rules,parameterDict,observableList= readBNGXML.parseXML('output{0}.xml'.format(number))
                
            except:
                continue
            nodeDict[number] = simpleGraph(graph,species,observableList,number,superNodeDict)
            
            
    
       
       
        
                        
        nx.write_gml(graph,'cgraph_{0}.gml'.format(cidx))
        break