def find_stdev( text, node, knockouts, repeat, steps ):
    "Finds the standard deviation of a node with two modes"
    coll = {}
    fullt  = 10
    mode  = 'plde'
    step = fullt*10
    print '- start run, %s' % mode
    
    model = Model( mode=mode, text=text )
    results = {}
    values=[]
    coll={}
    for gene in knockouts:
        for i in xrange( repeat ):
            model.initialize( missing=util.randbool , defaults = {gene:(0.0,1.0,step*10)})
            model.iterate( steps=step, fullt=fullt ) 
            values = map( float,  model.data[node] )
            results.setdefault(gene,[]).append( values )
        
        resmat = numpy.array( results[gene] )  
        means  = numpy.mean( resmat , 0 )
        stdev  = numpy.std( resmat,0 )
        coll[gene]= [means, stdev]

    return coll
def run_plde( text, repeat, fullt ):
    "Runs the piecewise model on the text"

    steps  = fullt * 10
    model = Model( mode='plde', text=text )
    coll = []
    print '- start plde'
    for i in xrange( repeat ):
        model.initialize( missing=util.randbool )
        model.iterate( fullt=fullt, steps=steps )
        coll.append( model.data )
    
    return coll
    def test_function_initializer( self ):
        "Testing function initializer"
        text = """
        MP = True
        1: PIC* = PIC
        1: MP* = PIC and MP
        """
        data = self.params[1]
        eng  = Model( mode='plde', text=text )
        eng.initialize( missing=helper.initializer( data ) )

        for node in 'PIC'.split():
            values = ( data[node].conc, data[node].decay, data[node].threshold )
            self.assertEqual( eng.start[node], values )
    def test_default_initializer( self ):
        "Testing default initializer"
        text = """
        ABC3 = (1, 2, 3)
        1: ABC1* = ABC
        1: ABC2* = ABC1 and ABC2
        """
        data = self.params[1]
        eng  = Model( mode='plde', text=text )
        eng.initialize( missing=helper.initializer( data, default=(1,1,1) ) )

        for node in 'ABC1 ABC2'.split():
            self.assertEqual( eng.start[node], (1, 1, 1) )
        
        self.assertEqual( eng.start['ABC3'], (1.0, 2.0, 3.0) )
def run_mutations( text, repeat, steps ):
    "Runs the asynchronous model with different mutations"

    # WT does not exist so it won't affect anything
    
    data = {}
    knockouts = 'WT S1P PA pHc ABI1 ROS'.split()
    for target in knockouts:
        print '- target %s' % target
        mtext  = boolean2.modify_states( text=text, turnoff=target )
        model = Model( mode='async', text=mtext )
        coll   = util.Collector()
        for i in xrange( repeat ):
            # unintialized nodes set to random
            model.initialize( missing=util.randbool )
            model.iterate( steps=steps )
            coll.collect( states=model.states, nodes=model.nodes )
        data[target] = coll.get_averages( normalize=True )

    return data
Exemple #6
0
def run(text, nodes, repeat, steps):
    """
    Runs the simulation and collects the nodes into a collector, 
    a convenience class that can average the values that it collects.
    """
    coll = util.Collector()

    for i in xrange(repeat):
        engine = Model(mode='async', text=text)
        engine.RULE_GETVALUE = new_getvalue
        # minimalist initial conditions, missing nodes set to false
        engine.initialize(missing=util.false)
        engine.iterate(steps=steps)
        coll.collect(states=engine.states, nodes=nodes)

    print '- completed'
    avgs = coll.get_averages(normalize=True)
    return avgs
def run( text, nodes, repeat, steps ):
    """
    Runs the simulation and collects the nodes into a collector, 
    a convenience class that can average the values that it collects.
    """
    coll = util.Collector()
    
    for i in xrange( repeat ):
        engine  = Model( mode='async', text=text )
        engine.RULE_GETVALUE = new_getvalue
        # minimalist initial conditions, missing nodes set to false
        engine.initialize( missing=util.false )
        engine.iterate( steps=steps)
        coll.collect( states=engine.states, nodes=nodes )

    print '- completed'
    avgs = coll.get_averages( normalize=True )
    return avgs
def run_mutations(text, repeat, steps):
    "Runs the asynchronous model with different mutations"

    # WT does not exist so it won't affect anything

    data = {}
    knockouts = 'WT S1P PA pHc ABI1 ROS'.split()
    for target in knockouts:
        print('- target %s' % target)
        mtext = boolean2.modify_states(text=text, turnoff=target)
        model = Model(mode='async', text=mtext)
        coll = util.Collector()
        for i in range(repeat):
            # unintialized nodes set to random
            model.initialize(missing=util.randbool)
            model.iterate(steps=steps)
            coll.collect(states=model.states, nodes=model.nodes)
        data[target] = coll.get_averages(normalize=True)

    return data
Exemple #9
0
CONC = CONC_PARAMS[5]
COMP = COMP_PARAMS[5]


# helper function that Binds the local override to active COMP parameter
def local_override(node, indexer, tokens):
    return overrides.override(node, indexer, tokens, COMP)


#
# there will be two models, one for WT and the other for a BC knockout
#
wt_text = file('Bb.txt').read()
bc_text = boolean2.modify_states(text=wt_text, turnoff=["BC"])

model1 = Model(text=wt_text, mode='plde')
model2 = Model(text=bc_text, mode='plde')

model1.OVERRIDE = local_override
model2.OVERRIDE = local_override

model1.initialize(missing=helper.initializer(CONC))
model2.initialize(missing=helper.initializer(CONC))

# see localdefs for all function definitions
model1.iterate(fullt=FULLT, steps=STEPS, localdefs='localdefs')
model2.iterate(fullt=FULLT, steps=STEPS, localdefs='localdefs')

# saves the simulation resutls into a file
data = [model1.data, model2.data, model1.t]
import random
from boolean2 import Model, state, util

rules = """
A = B = C = True

A* = A
B* = A
C* = B
"""

# create a custom value setter
def set_value( state, name, value, p ):
    "Custom value setter"

    # detect the node of interest
    if name == 'B':
        print 'now setting node %s' % name 
        value = random.choice ( (True, False) )
    
    # this sets the attribute
    setattr( state, name, value )
    return value

model = Model( text=rules, mode='sync')
model.parser.RULE_SETVALUE = set_value
model.initialize()
model.iterate( steps=5 )

for state in model.states:
    print state.A, state.B, state.C
Exemple #11
0
WG_4* = wg_mRNA_4
en_mRNA_4* = (WG_3 or WG_1) and not SLP_4
EN_4* = en_mRNA_4
hh_mRNA_4* = EN_4 and not CIR_4
HH_4* = hh_mRNA_4
ptc_mRNA_4* = CIA_4 and not EN_4 and not CIR_4
PTC_4* = ptc_mRNA_4 or (PTC_4 and not HH_3 and not HH_1)
PH_4* = (ptc_mRNA_4 or (PTC_4 and not HH_3 and not HH_1)) and (hh_mRNA_3 or hh_mRNA_1)
SMO_4* = not (ptc_mRNA_4 or (PTC_4 and not HH_3 and not HH_1)) or hh_mRNA_3 or hh_mRNA_1
ci_mRNA_4* = not EN_4
CI_4* = ci_mRNA_4
CIA_4* = CI_4 and (SMO_4 or hh_mRNA_3 or hh_mRNA_1)
CIR_4* = CI_4 and not SMO_4 and not hh_mRNA_3 and not hh_mRNA_1
"""

model = Model(rules, mode='sync')
model.initialize()
model.iterate(steps=30)

#------------------------------------------------------- for node in model.data:
    #---------------------------------------------- print node, model.data[node]

#cycle = model.report_cycles()
nodeList = []
cell_1_states = []
cell_2_states = []
cell_3_states = []
cell_4_states = []
stateList = str(model.states[model.detect_cycles()[0]])
stateList = stateList.split(' ', 1)[1]
stateList = stateList.split(', ')
Exemple #12
0
        # pick at random from True, False and original value
        value = random.choice([True, False, value])

    return util.default_set_value(state, name, value, p)


def new_getvalue(state, name, p):
    """
    Called every time a node value is used in an expression. 
    It will override the value for the current step only.
    """
    value = util.default_get_value(state, name, p)

    if name == 'B':
        # pick at random from True, False and original value
        return random.choice([True, False, value])
    else:
        return value


model = Model(mode='sync', text='demo-rules.txt')

# assign the new rules to the engine
model.RULE_SETVALUE = new_setvalue
model.RULE_GETVALUE = new_getvalue

model.initialize()
model.iterate(steps=5)

for state in model.states:
    print state
Exemple #13
0
def BoolMod(timecourse, booldata, str_mode):
    coll = util.Collector()
    model = Model(text=booldata, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # the data is the inital data, the func is the initializer
    n = 0
    for data, initfunc in initializer:
        # shows the initial values
        print((n) / float(32768)) * 100, "% done."
        n = n + 1
        model.initialize(missing=initfunc)
        model.iterate(steps=timecourse)
        # takes all nodes
        nodes = model.nodes
        coll.collect(states=model.states, nodes=nodes)
    # --------------- Detect Cycles ------------------#
    print model.report_cycles()
    # Return results
    avgs = coll.get_averages(normalize=True)
    data = pd.DataFrame(avgs)
    pd.DataFrame.to_csv(data, "test.csv")
    print avgs
    print model.fp()
    # ------------- Retrieve the Data -------------#
    # 6-gene input signature
    #ALK = avgs.get('ALK')
    #MDK = avgs.get('MDK')
    #TrkA = avgs.get('TrkA')
    #NGF = avgs.get('NGF')
    #TrkB = avgs.get('TrkB')
    #BDNF = avgs.get('BDNF')

    # Model outcome states
    Differentiation = avgs.get('Differentiation')
    Apoptosis = avgs.get('Apoptosis')
    Proliferation = avgs.get('Proliferation')
    Angiogenesis = avgs.get('Angiogenesis')

    # Nodes at Issue
    #DNADamage = avgs.get('DNADamage')
    #p53 = avgs.get('p53')

    # Other Nodes
    #MDM2 = avgs.get('MDM2')
    #MAPK = avgs.get('MAPK')
    #p27 = avgs.get('p27')
    #FoxO = avgs.get('FoxO')
    #AKT = avgs.get('AKT')
    #Ras = avgs.get('Ras')
    #MYCN = avgs.get('MYCN')
    #MTOR = avgs.get('MTOR')
    #IP3 = avgs.get('IP3')

    ### Time axis (x)
    t = range(0, timecourse + 1)

    # Create plots with pre-defined labels. Try to make this a for loop
    fig, ax = plt.subplots()
    ax.plot(t, Differentiation, label='Differentiation')
    ax.plot(t, Apoptosis, label='Apoptosis')
    ax.plot(t, Angiogenesis, label='Angiogenesis')
    ax.plot(t, Proliferation, label='Proliferation')
    # ax.plot(t, TrkA, label='TrkA')
    # ax.plot(t, TrkB, label = 'TrkB')
    # ax.plot(t, MYCN, label = 'MYCN')
    # ax.plot(t, NGF, label = 'NGF')
    # ax.plot(t, MDK, label = 'MDK')
    # ax.plot(t, ALK, label = 'ALK')
    # ax.plot(t, Ras, label = 'Ras')
    # x.plot(t, AKT, label = 'AKT')
    # ax.plot(t, FoxO, label = 'FoxO')
    # ax.plot(t, p27, label = 'P27')
    # ax.plot(t, p53, label = 'P53')

    # Legends
    legend = ax.legend(loc=0, shadow=True, fontsize='medium')
    plt.xlabel('Iterations')
    plt.ylabel('On Proportion')
    plt.title("Asynchronous Updating Model")

    # beautify
    legend.get_frame().set_facecolor('#C0C0C0')

    # View Plot
    plt.show()

    return avgs
Exemple #14
0
# use data from the sixth row (it is zero based counting!) in the file
CONC = CONC_PARAMS[5]
COMP = COMP_PARAMS[5]

# helper function that Binds the local override to active COMP parameter
def local_override( node, indexer, tokens ):
    return overrides.override( node, indexer, tokens, COMP )

#
# there will be two models, one for WT and the other for a BC knockout
#
wt_text = file('Bb.txt').read()
bc_text = boolean2.modify_states( text=wt_text, turnoff= [ "BC"  ] )

model1 = Model( text=wt_text, mode='plde' )
model2 = Model( text=bc_text, mode='plde' )

model1.OVERRIDE = local_override
model2.OVERRIDE = local_override

model1.initialize( missing = helper.initializer( CONC )  )
model2.initialize( missing = helper.initializer( CONC )  )

# see localdefs for all function definitions
model1.iterate( fullt=FULLT, steps=STEPS, localdefs='localdefs' )
model2.iterate( fullt=FULLT, steps=STEPS, localdefs='localdefs' )

# saves the simulation resutls into a file
data = [ model1.data, model2.data, model1.t ]
Exemple #15
0
# This initial condition leads to a cycle of period 4.
# If A is set to False, a steady state is obtained.
#
#
text = """
A = True
B = False
C = False
D = True

B* = A or C
C* = A and not D
D* = B and C
"""

model = Model(text=text, mode='sync')
model.initialize()
model.iterate(steps=15)

# the model data attribute holds the states keyed by nodes
for node in model.data:
    print node, model.data[node]

# this is a helper function that reports the cycle lengths
# and the  index at wich the cycle started
model.report_cycles()

#
# the same thing as above but
# will not print only return the two parameters
#
Exemple #16
0
B* = A or C
C* = A and not D
D* = B and C
"""

seen = {}

 
#
# the key will be the fingerprint of the first state 
#(some random inital conditions may be the same), it is fine to overwrite in this case
# as the 'sync' update rule is completely deterministic
#
for i in range(10):
    model = Model( text=text, mode='sync')
    model.initialize()
    model.iterate( steps=20 )

    # detect the cycles in the states
    size, index = model.detect_cycles() 
    
    # fingerprint of the first state
    key = model.first.fp()

    # keep only the first 10 states out of the 20
    values = [ x.fp() for x in model.states[:10] ]

    # store the fingerprinted values for each initial state
    seen [ key ] = (index, size, values )
Exemple #17
0
def BoolModPlot(timecourse, samples, booldata, str_mode):
    coll = util.Collector()
    for i in range(samples):
        model = Model(text=booldata, mode=str_mode)
        model.initialize()
        model.iterate(steps=timecourse)
        print model.states

        # takes all nodes
        nodes = model.nodes
        coll.collect(states=model.states, nodes=nodes)

    # --------------- Detect Cycles ------------------#
    print model.report_cycles()
    # Return results
    avgs = coll.get_averages(normalize=True)
    data = pd.DataFrame(avgs)
    pd.DataFrame.to_csv(data, "test.csv")
    # ------------- Retrieve the Data -------------#
    # 6-gene input signature
    ALK = avgs.get('ALK')
    MDK = avgs.get('MDK')
    TrkA = avgs.get('TrkA')
    NGF = avgs.get('NGF')
    TrkB = avgs.get('TrkB')
    BDNF = avgs.get('BDNF')

    # Model outcome states
    Differentiation = avgs.get('Differentiation')
    Apoptosis = avgs.get('Apoptosis')
    Proliferation = avgs.get('Proliferation')
    Angiogenesis = avgs.get('Angiogenesis')

    # Nodes at Issue
    DNADamage = avgs.get('DNADamage')
    p53 = avgs.get('p53')

    # Other Nodes
    MDM2 = avgs.get('MDM2')
    MAPK = avgs.get('MAPK')
    p27 = avgs.get('p27')
    FoxO = avgs.get('FoxO')
    AKT = avgs.get('AKT')
    Ras = avgs.get('Ras')
    MYCN = avgs.get('MYCN')
    MTOR = avgs.get('MTOR')
    IP3 = avgs.get('IP3')

    ### Time axis (x)
    t = range(0, timecourse + 1)

    # Create plots with pre-defined labels. Try to make this a for loop
    fig, ax = plt.subplots()
    ax.plot(t, Differentiation, label='Differentiation')
    ax.plot(t, Apoptosis, label='Apoptosis')
    ax.plot(t, Angiogenesis, label='Angiogenesis')
    ax.plot(t, Proliferation, label='Proliferation')
    # ax.plot(t, TrkA, label='TrkA')
    # ax.plot(t, TrkB, label = 'TrkB')
    # ax.plot(t, MYCN, label = 'MYCN')
    # ax.plot(t, NGF, label = 'NGF')
    # ax.plot(t, MDK, label = 'MDK')
    # ax.plot(t, ALK, label = 'ALK')
    # ax.plot(t, Ras, label = 'Ras')
    # ax.plot(t, AKT, label = 'AKT')
    # ax.plot(t, FoxO, label = 'FoxO')
    # ax.plot(t, p27, label = 'P27')
    # ax.plot(t, p53, label = 'P53')

    # Legends
    legend = ax.legend(loc=0, shadow=True, fontsize='medium')
    plt.xlabel('Iterations')
    plt.ylabel('On Proportion')
    plt.title("Asynchronous Updating Model")

    # beautify
    legend.get_frame().set_facecolor('#C0C0C0')

    # View Plot
    plt.show()

    return avgs
Exemple #18
0
def all_ss_model_w_fix(str_file, str_mode, timecourse, csv_out_txt, geneon=[], geneoff=[], dumpevery=1000,\
                       nodes_for_averages=['Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation']):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.
    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async
    numnodes: the number of nodes in the model
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    def dumper():
        '''
        will dump the data into a CSV file of name csv_out_txt
        :return: none
        '''
        with open(csv_out_txt, 'a') as csvfile:
            datadumper = csv.writer(csvfile, lineterminator='\n')
            for row in d:
                datadumper.writerow(row)
        return

    # header names for pandas dataframe and CSV data dump file
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState', str(nodes_for_averages[0]),\
               str(nodes_for_averages[1]), str(nodes_for_averages[2]), str(nodes_for_averages[3])]

    # Init a CSV to contain the data generated over the course of the run
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)

    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    if geneoff != [] or geneon != []:
        d = []
        p = 0
        n = 0
        for data_init in initializer:
            data = data_init[0]
            for i in range(len(geneoff)):
                if data[str(geneoff[i])]:
                    initializer_new.append(data_init)
            for i in range(len(geneon)):
                if data[str(geneon[i])]:
                    initializer_new.append(data_init)

        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = nodes_for_averages

            d.append(list(model.detect_cycles()))
            d[p].append(model.fp())
            d[p].append(model.first)
            d[p].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)
            # Console output for debugging and progress tracking
            print 'The fingerprint is', d[p][2]
            print 'The cycle length is', d[p][1]

            prc = ((n) / float(pow(2, load_status_divisor))) * 100
            print '%.2f' % prc + "% done."
            print '\n'

            # Iterate the model
            n += 1
            p += 1

            if p == dumpevery:
                dumper()
                p = 0
                d = []

        dumper()
        d = []
    else:
        d = []
        p = 0
        n = 0
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = nodes_for_averages

            d.append(list(model.detect_cycles()))
            d[p].append(model.fp())
            d[p].append(model.first)
            d[p].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Console output for debugging and progress tracking
            print 'The fingerprint is', d[p][2]
            print 'The cycle length is', d[p][1]

            prc = ((n) / float(pow(2, load_status_divisor))) * 100
            print '%.2f' % prc + "% done."
            print '\n'

            # Iterate the model
            n += 1
            p += 1

            if p == dumpevery:
                dumper()
                p = 0
                d = []

        dumper()
        d = []

    # Generate a pandas dataframe of the dump file

    df = pd.read_csv(csv_out_txt)

    # Add the avg on state of the nodes specified by the user. They will output to new columns in row 1 of the data
    if nodes_for_averages != []:
        avgs = coll.get_averages(normalize=True)
        a1 = avgs[nodes_for_averages[0]]
        b1 = avgs[nodes_for_averages[1]]
        c1 = avgs[nodes_for_averages[2]]
        d1 = avgs[nodes_for_averages[3]]

        # Set the averages values in the dataframe.

        df.set_value(0, 'Apoptosis', a1[0])
        df.set_value(0, 'Proliferation', b1[0])
        df.set_value(0, 'Differentiation', d1[0])
        df.set_value(0, 'Angiogenesis', c1[0])

        df.to_csv(csv_out_txt)

    return df
Exemple #19
0
# This initial condition leads to a cycle of period 4.
# If A is set to False, a steady state is obtained.
#
#
text = """
A = True
B = False
C = False
D = True

B* = A or C
C* = A and not D
D* = B and C
"""

model = Model(text=text, mode="sync")
model.initialize()
model.iterate(steps=15)

# the model data attribute holds the states keyed by nodes
for node in model.data:
    print node, model.data[node]

# this is a helper function that reports the cycle lengths
# and the  index at wich the cycle started
model.report_cycles()

#
# the same thing as above but
# will not print only return the two parameters
#
def datahandler(str_file, str_mode='sync', timecourse=10, csv_out_txt="default.csv", geneon=[], geneoff=[], dumpevery=1000,\
                nodes_for_averages=['Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'], debug=False, graph=False):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.

    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async. Default is sync
    timecourse: The number of time steps that the model will progress through. Default is ten
    csv_out_text: the results are output to this CSV file. Default is 'default.csv.'
    geneon: Takes a list of nodes as strings. These nodes will be forced ON for all simulations
    geneoff: Takes a list of nodes as strings. These nodes will be forced OFF for all simulations
    dumpevery: utilizes the dumper subfunction to write the data to file frequently to avoid running out of RAM.
        Default is to dump every 1000 simulations
    nodes_for_averages: Input a list of nodes as strings. The program will keep track of the average ON state of these
        nodes. The default values are for use with the BPC model of neuroblastoma (2018)
    debug: Default as false. When True will print the progress of the simulation. Will slightly slow rate of simulation
        when on
    graph: Boolean literal, if true will display a
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    def dumper():
        '''
        sub-function
        Will dump the data into a CSV file of name csv_out_txt, keeps memory burden low for extremely large networks.
        :return: none
        '''
        with open(csv_out_txt, 'a') as csvfile:
            datadumper = csv.writer(csvfile, lineterminator='\n')
            for row in d:
                datadumper.writerow(row)
        return None

    # Header names for pandas dataframe and CSV data dump file
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState', 'FirstFP', 'SteadyState', str(nodes_for_averages[0]),\
               str(nodes_for_averages[1]), str(nodes_for_averages[2]), str(nodes_for_averages[3])]

    # Init a CSV to contain the data generated over the course of the run
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)

    # Variable saving the imported list of rules for use by the model.
    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    # An initializer for single KO mutants.
    # TODO I need to make it so this initializer can adapt to any number of KO/OE genes. As it stands it can only do up
    # to a double. Not helpful! Need to make recursive?
    if geneoff != [] or geneon != []:
        if len(geneoff) + len(geneon) == 1:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                for i in range(len(geneoff)):
                    if not data[geneoff[i]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Single Gene Knockout, Geneoff'
                for i in range(len(geneon)):
                    if data[geneon[i]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Single Gene Overexpression, Geneon'

        # An initializer for double KO when some on and some off
        elif len(geneoff) >= 1 and len(geneon) >= 1:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                if not data[geneoff[0]]:
                    if data[geneon[0]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Gene off then gene on, KO then Overexpression'
                elif data[geneon[0]]:
                    if not data[geneoff[0]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Gene on then gene off, Overexpression then KO'
                else:
                    pass

        # An initializer for double KO when both genes are on or off
        elif len(geneoff) == 2 or len(geneon) == 2:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                i = 0
                while i <= 1:
                    if not data[geneoff[i]]:
                        i += 1
                        if not data[geneoff[i]]:
                            initializer_new.append(data_init)
                            if debug:
                                print 'Double Gene Knockout'
                            i += 1
                            break
                        break
                    break

            for data_init in initializer:
                data = data_init[0]
                i = 0
                while i <= 1:
                    if data[geneon[i]]:
                        i += 1
                        if data[geneon[i]]:
                            initializer_new.append(data_init)
                            if debug:
                                print 'Double Gene Overexpression'
                            i += 1
                            break
                        break
                    break
        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data vfariable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            # todo: switch to dynamic length if i is going to go over 10, becomes much less efficient quickly
            model.initialize(defaults=data)
            for i in range(timecourse):
                model.iterate(steps=i)
                e = model.detect_cycles()
                nodes = nodes_for_averages

                if e[1] == 1:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] > 1:

                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug == True:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]

                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] == 0 and i + 1 == timecourse:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

            # Iterate the model
            n += 1
            p += 1

            # Dump data to file if the iterator p has reached the dumpevery variable. Keeps RAM burden low. Resets the
            # p counter to zero and overwrites the data that had been written to file.
            if p == dumpevery:
                dumper()
                p = 0
                d = []

        # A catchall at the end of the set of simulations that dumps any remaining data that might not have been
        # enough to trigger the prior dumper.
        dumper()
        d = []

    else:
        d = []
        p = 0
        if debug:
            n = 0
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            for i in range(timecourse):
                model.iterate(steps=i)
                e = model.detect_cycles()
                nodes = nodes_for_averages
                print e

                if e[1] == 1:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    # Affix the limit cycle or FPA that defines the simulation to a new column
                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] > 1:

                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                # This function makes sure that the entire user specified timecourse has passed before writing
                # that the model did not reach a steady state.
                elif e[1] == 0 and i + 1 == timecourse:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # detect states is used to figure out how many states to write to the collector object.
                    detect_states = 0 - e[1]
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    # Collect the averages over the course of the run.
                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

            # Iterate the model
            if debug:
                n += 1
            p += 1

            # Dump data to file if the iterator p has reached the dumpevery variable. Keeps RAM burden low. Resets the
            # p counter to zero and overwrites the data that had been written to file.
            if p == dumpevery:
                dumper()
                p = 0
                d = []
        # A catchall at the end of the set of simulations that dumps any remaining data that might not have been enough
        # to trigger the prior dumper.
        dumper()
        d = []

    # Generate a pandas DataFrame of the dump file.

    df = pd.read_csv(csv_out_txt)

    # Create count list of df
    fpa = df[df.CycleLength == 1]
    fpa["SteadyState"] = fpa['SteadyState'].str.strip('[]').astype(int)
    counts = fpa['SteadyState'].value_counts().sort_index()
    counts = pd.DataFrame([counts], columns=["SS", "NumberInFPA"])
    print counts

    counts['Proportion'] = counts['NumberInFPA'] / float(
        pow(2, load_status_divisor))

    # If there are nodes of interest in the nodes_for_averages will output the average on state of the nodes to the
    # dataframe  then update the CSV to contain them
    if nodes_for_averages is not []:
        for i in range(len(nodes_for_averages)):
            avgs = coll.get_averages(normalize=True)
            a1 = avgs[nodes_for_averages[i]]
            df.set_value(0, nodes_for_averages[i], a1[0])
        df.to_csv(csv_out_txt)

    #TODO: I need to create a method of exporting an excel file that is processed so that the user does not need to process them manually to get system state information

    # Graphical output
    # Will export a multicolor graph that shows the average on proportion of
    if graph is True:
        nfa = nodes_for_averages
        x = []
        for i in range(len(nfa)):
            x.append(nfa[i])
        gr = df[x]
        gr = gr.dropna()
        plot = gr.plot(kind='bar')
        plot.set_xlabel("Groups")
        plot.set_ylabel("On Proportion")
        plt.show()

    return df
Exemple #21
0
                                        vext = {}
                                        next = {}
                                        
                                        2: i *= n and (dext or jext)
                                        2: n *= i 
                                        2: j *= i and not (j and next) or jagged_oe and jagged_ko
                                        2: v *= vr and vext 
                                        2: d *= v or not i and not (d and next)
                                        2: vr *= v or not i
                                        
                                        3: tip *= d and vr and not i and not n and not j
                                        3: stalk *= i and n and j and not vr and not d
                                        3: tip_stalk *= d and vr and i and n and j
                                        3: hybrid *= not (tip or stalk or tip_stalk)
                                        """.format(n0, d0, j0, i0, vr0, v0, dext0, jext0, vext0, next0)
                                        model = Model(text = text_ics, mode = 'sync')
                                        on = ['jagged_oe', 'j']
                                        off = ['jagged_ko', 'j']
                                        text_mod = boolean2.modify_states(text = text_ics, turnon = on)
                                        model_jag_oe = Model(text = text_mod, mode = 'sync')
                                        text_mod2 = boolean2.modify_states(text = text_ics, turnoff = off)
                                        model_jag_ko = Model(text = text_mod2, mode = 'sync')
                                        model.initialize()
                                        model_jag_oe.initialize()
                                        model_jag_ko.initialize()
                                        model.iterate(steps = 20)
                                        model_jag_oe.iterate(steps = 20)
                                        model_jag_ko.iterate(steps = 20)

                                        n = model.data['n']
                                        d = model.data['d']
Exemple #22
0
# put the boolean library on the path
from boolean2 import Model, util

def start_iteration(index, model):
    if index < 5:
        state = True
    else:
        state = False

    model.last['A'] = state


#
# run the simulation
#
model = Model( mode='sync', text='demo-rules.txt')
model.initialize()
#model.RULE_START_ITERATION = start_iteration
model.iterate( steps=5 )

# all the states are now computed and stored internally

# you can print the states
for state in model.states:
    print state

print '-' * 20

# save states into a file
model.save_states( 'states.txt' )
def all_ss_model_w_fix(str_file,
                       str_mode,
                       timecourse,
                       csv_out_txt,
                       geneon=[],
                       geneoff=[]):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.
    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async
    numnodes: the number of nodes in the model
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)
    n = 0
    d = []

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    if geneoff != [] or geneon != []:
        for data_init in initializer:
            data = data_init[0]
            for i in range(len(geneoff)):
                if data[str(geneoff[i])]:
                    initializer_new.append(data_init)
            for i in range(len(geneon)):
                if data[str(geneon[i])]:
                    initializer_new.append(data_init)

        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = [
                'Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'
            ]

            d.append(list(model.detect_cycles()))
            d[n].append(model.fp())
            d[n].append(model.first)
            d[n].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Converts collected data back to immutable tuple
            # d[n] = tuple(d[n])

            # Show data as it is generated. For debug purposes
            print d[n]
            # this print function is the status bar output.
            print((n) / float(pow(2, load_status_divisor))) * 100, "% done."
            n += 1
    else:
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = [
                'Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'
            ]

            d.append(list(model.detect_cycles()))
            d[n].append(model.fp())
            d[n].append(model.first)
            d[n].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Converts collected data back to immutable tuple
            # d[n] = tuple(d[n])

            # Show data as it is generated. For debug purposes
            print d[n]
            # this print function is the status bar output.
            print((n) / float(pow(2, load_status_divisor))) * 100, "% done."
            n += 1

    # Add the avg on state of the nodes specified by the user. They will output to new columns in row 1 of the data

    avgs = coll.get_averages(normalize=True)
    Angiogenesis = avgs['Angiogenesis']
    Proliferation = avgs['Proliferation']
    Apoptosis = avgs['Apoptosis']
    Differentiation = avgs['Differentiation']

    # Append the average Data to the data set
    d[0].append(Apoptosis[0])
    d[0].append(Proliferation[0])
    d[0].append(Angiogenesis[0])
    d[0].append(Differentiation[0])

    # header names for pandas dataframe
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState',\
               'Avg_Apoptosis', 'Avg_Proliferation', 'Avg_Angiogenesis', 'Avg_Differentiation']

    # convert information to a pandas dataframe

    df = pd.DataFrame(d)
    df.columns = headers
    print

    # Export data to csv
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)
        for row in d:
            csv_out.writerow(row)
    return df
Exemple #24
0
from boolean2 import Model, util


def start_iteration(index, model):
    if index < 5:
        state = True
    else:
        state = False

    model.last['A'] = state


#
# run the simulation
#
model = Model(mode='sync', text='demo-rules.txt')
model.initialize()
# model.RULE_START_ITERATION = start_iteration
model.iterate(steps=5)

# all the states are now computed and stored internally

# you can print the states
for state in model.states:
    print(state)

print('-' * 20)

# save states into a file
model.save_states('states.txt')
Exemple #25
0
from boolean2 import Model

text = """
# initial values
A = B = C = True

# updating rules

A* = A and C
B* = A and B
C* = not A
"""

model = Model(text=text, mode='sync')
model.initialize()
model.iterate(steps=5)

for state in model.states:
    print(state.A, state.B, state.C)
    """
    if name == 'A':
        # pick at random from True, False and original value
        value = random.choice( [True, False, value] )

    return util.default_set_value( state, name, value, p )    

def new_getvalue( state, name, p):
    """
    Called every time a node value is used in an expression. 
    It will override the value for the current step only.
    """
    value = util.default_get_value( state, name, p ) 

    if name == 'B':
        # pick at random from True, False and original value
        return random.choice( [True, False, value] )  
    else:
        return value 

model = Model( mode='sync', text='demo-rules.txt' )

# assign the new rules to the engine
model.RULE_SETVALUE = new_setvalue
model.RULE_GETVALUE = new_getvalue

model.initialize()
model.iterate( steps=5 )

for state in model.states:
    print(state)