示例#1
0
def simulation( ):

    # creating the transition graph, the verbose parameter sets the nodes to 
    # either short names like 1, 2, 3 or long (binary) names like 10001
    #
    # the logfile contains the transition edges and the identity of the nodes
    trans = network.TransGraph( logfile='timemodel.log', verbose=True )

    # create the model, you may use a text string or a filename
    model = boolean2.Model( text='timemodel.txt', mode='time')

    
    # here we generates all initial states
    #
    # IMPORTANT: Only uninitialized nodes will get new values,
    # to keep a node the same in all iterations initialize it in the rules
    #
    # when the limit parameter is a number it will takes the first that 
    # many initial values, leave it to None to use all initial values
    initializer = state.all_initial_states( model.nodes, limit=None )

    # the data is a dictionary with the inital data, print it to see what it contains
    # the initfunc is the initializer function that can be used
    for data, initfunc in initializer:
        model.initialize( missing=initfunc )
        model.iterate( 12 )
        trans.add( model.states, times=range(12) )

    # saves the transition graph into a gml file
    trans.save( 'timemodel.gml' )
示例#2
0
def simulation():

    # creating the transition graph, the verbose parameter sets the nodes to
    # either short names like 1, 2, 3 or long (binary) names like 10001
    #
    # the logfile contains the transition edges and the identity of the nodes
    trans = network.TransGraph(logfile='timemodel.log', verbose=True)

    # create the model, you may use a text string or a filename
    model = boolean2.Model(text='timemodel.txt', mode='time')

    # here we generates all initial states
    #
    # IMPORTANT: Only uninitialized nodes will get new values,
    # to keep a node the same in all iterations initialize it in the rules
    #
    # when the limit parameter is a number it will takes the first that
    # many initial values, leave it to None to use all initial values
    initializer = state.all_initial_states(model.nodes, limit=None)

    # the data is a dictionary with the inital data, print it to see what it contains
    # the initfunc is the initializer function that can be used
    for data, initfunc in initializer:
        model.initialize(missing=initfunc)
        model.iterate(12)
        trans.add(model.states, times=range(12))

    # saves the transition graph into a gml file
    trans.save('timemodel.gml')
示例#3
0
def simulation( trans ):
    "One simulation step will update the transition graph"

    # create the model
    model = boolean2.Model( text=rules, mode='async')

    # generates all states, set limit to a value to keep only the first that many states
    # when limit is a number it will take the first that many initial states
    initializer = state.all_initial_states( model.nodes, limit=None )

    # the data is the inital data, the func is the initializer
    for data, initfunc in initializer:
        model.initialize(missing=initfunc)
        model.iterate(5)
        trans.add( model.states, times=list(range(5)) )
示例#4
0
def simulation(trans):
    "One simulation step will update the transition graph"

    # create the model
    model = boolean2.Model(text=rules, mode='async')

    # generates all states, set limit to a value to keep only the first that many states
    # when limit is a number it will take the first that many initial states
    initializer = state.all_initial_states(model.nodes, limit=None)

    # the data is the inital data, the func is the initializer
    for data, initfunc in initializer:
        model.initialize(missing=initfunc)
        model.iterate(5)
        trans.add(model.states, times=range(5))
def all_ss_model_w_fix(str_file,
                       str_mode,
                       timecourse,
                       csv_out_txt,
                       geneon=[],
                       geneoff=[]):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.
    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async
    numnodes: the number of nodes in the model
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)
    n = 0
    d = []

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    if geneoff != [] or geneon != []:
        for data_init in initializer:
            data = data_init[0]
            for i in range(len(geneoff)):
                if data[str(geneoff[i])]:
                    initializer_new.append(data_init)
            for i in range(len(geneon)):
                if data[str(geneon[i])]:
                    initializer_new.append(data_init)

        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = [
                'Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'
            ]

            d.append(list(model.detect_cycles()))
            d[n].append(model.fp())
            d[n].append(model.first)
            d[n].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Converts collected data back to immutable tuple
            # d[n] = tuple(d[n])

            # Show data as it is generated. For debug purposes
            print d[n]
            # this print function is the status bar output.
            print((n) / float(pow(2, load_status_divisor))) * 100, "% done."
            n += 1
    else:
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = [
                'Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'
            ]

            d.append(list(model.detect_cycles()))
            d[n].append(model.fp())
            d[n].append(model.first)
            d[n].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Converts collected data back to immutable tuple
            # d[n] = tuple(d[n])

            # Show data as it is generated. For debug purposes
            print d[n]
            # this print function is the status bar output.
            print((n) / float(pow(2, load_status_divisor))) * 100, "% done."
            n += 1

    # Add the avg on state of the nodes specified by the user. They will output to new columns in row 1 of the data

    avgs = coll.get_averages(normalize=True)
    Angiogenesis = avgs['Angiogenesis']
    Proliferation = avgs['Proliferation']
    Apoptosis = avgs['Apoptosis']
    Differentiation = avgs['Differentiation']

    # Append the average Data to the data set
    d[0].append(Apoptosis[0])
    d[0].append(Proliferation[0])
    d[0].append(Angiogenesis[0])
    d[0].append(Differentiation[0])

    # header names for pandas dataframe
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState',\
               'Avg_Apoptosis', 'Avg_Proliferation', 'Avg_Angiogenesis', 'Avg_Differentiation']

    # convert information to a pandas dataframe

    df = pd.DataFrame(d)
    df.columns = headers
    print

    # Export data to csv
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)
        for row in d:
            csv_out.writerow(row)
    return df
示例#6
0
import boolean2
from boolean2 import util, state, network

# updating rules

rules = """
    CIS* = Ca
    CaATPase* = Ca
    Ca* = CIS and (not CaATPase)
"""

# create the model
model = boolean2.Model( text=rules, mode='async')

# generates all states, set limit to a value to keep only the first that many states
# when limit is a number it will take the first that many initial states
initializer = state.all_initial_states( model.nodes, limit=None )

# the data is the inital data, the func is the initializer
for data, initfunc in initializer:
    # shows the initial values
    print(data)
    model.initialize(missing=initfunc)
    model.iterate(5)
示例#7
0
def BoolMod(timecourse, booldata, str_mode):
    coll = util.Collector()
    model = Model(text=booldata, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # the data is the inital data, the func is the initializer
    n = 0
    for data, initfunc in initializer:
        # shows the initial values
        print((n) / float(32768)) * 100, "% done."
        n = n + 1
        model.initialize(missing=initfunc)
        model.iterate(steps=timecourse)
        # takes all nodes
        nodes = model.nodes
        coll.collect(states=model.states, nodes=nodes)
    # --------------- Detect Cycles ------------------#
    print model.report_cycles()
    # Return results
    avgs = coll.get_averages(normalize=True)
    data = pd.DataFrame(avgs)
    pd.DataFrame.to_csv(data, "test.csv")
    print avgs
    print model.fp()
    # ------------- Retrieve the Data -------------#
    # 6-gene input signature
    #ALK = avgs.get('ALK')
    #MDK = avgs.get('MDK')
    #TrkA = avgs.get('TrkA')
    #NGF = avgs.get('NGF')
    #TrkB = avgs.get('TrkB')
    #BDNF = avgs.get('BDNF')

    # Model outcome states
    Differentiation = avgs.get('Differentiation')
    Apoptosis = avgs.get('Apoptosis')
    Proliferation = avgs.get('Proliferation')
    Angiogenesis = avgs.get('Angiogenesis')

    # Nodes at Issue
    #DNADamage = avgs.get('DNADamage')
    #p53 = avgs.get('p53')

    # Other Nodes
    #MDM2 = avgs.get('MDM2')
    #MAPK = avgs.get('MAPK')
    #p27 = avgs.get('p27')
    #FoxO = avgs.get('FoxO')
    #AKT = avgs.get('AKT')
    #Ras = avgs.get('Ras')
    #MYCN = avgs.get('MYCN')
    #MTOR = avgs.get('MTOR')
    #IP3 = avgs.get('IP3')

    ### Time axis (x)
    t = range(0, timecourse + 1)

    # Create plots with pre-defined labels. Try to make this a for loop
    fig, ax = plt.subplots()
    ax.plot(t, Differentiation, label='Differentiation')
    ax.plot(t, Apoptosis, label='Apoptosis')
    ax.plot(t, Angiogenesis, label='Angiogenesis')
    ax.plot(t, Proliferation, label='Proliferation')
    # ax.plot(t, TrkA, label='TrkA')
    # ax.plot(t, TrkB, label = 'TrkB')
    # ax.plot(t, MYCN, label = 'MYCN')
    # ax.plot(t, NGF, label = 'NGF')
    # ax.plot(t, MDK, label = 'MDK')
    # ax.plot(t, ALK, label = 'ALK')
    # ax.plot(t, Ras, label = 'Ras')
    # x.plot(t, AKT, label = 'AKT')
    # ax.plot(t, FoxO, label = 'FoxO')
    # ax.plot(t, p27, label = 'P27')
    # ax.plot(t, p53, label = 'P53')

    # Legends
    legend = ax.legend(loc=0, shadow=True, fontsize='medium')
    plt.xlabel('Iterations')
    plt.ylabel('On Proportion')
    plt.title("Asynchronous Updating Model")

    # beautify
    legend.get_frame().set_facecolor('#C0C0C0')

    # View Plot
    plt.show()

    return avgs
def datahandler(str_file, str_mode='sync', timecourse=10, csv_out_txt="default.csv", geneon=[], geneoff=[], dumpevery=1000,\
                nodes_for_averages=['Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation'], debug=False, graph=False):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.

    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async. Default is sync
    timecourse: The number of time steps that the model will progress through. Default is ten
    csv_out_text: the results are output to this CSV file. Default is 'default.csv.'
    geneon: Takes a list of nodes as strings. These nodes will be forced ON for all simulations
    geneoff: Takes a list of nodes as strings. These nodes will be forced OFF for all simulations
    dumpevery: utilizes the dumper subfunction to write the data to file frequently to avoid running out of RAM.
        Default is to dump every 1000 simulations
    nodes_for_averages: Input a list of nodes as strings. The program will keep track of the average ON state of these
        nodes. The default values are for use with the BPC model of neuroblastoma (2018)
    debug: Default as false. When True will print the progress of the simulation. Will slightly slow rate of simulation
        when on
    graph: Boolean literal, if true will display a
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    def dumper():
        '''
        sub-function
        Will dump the data into a CSV file of name csv_out_txt, keeps memory burden low for extremely large networks.
        :return: none
        '''
        with open(csv_out_txt, 'a') as csvfile:
            datadumper = csv.writer(csvfile, lineterminator='\n')
            for row in d:
                datadumper.writerow(row)
        return None

    # Header names for pandas dataframe and CSV data dump file
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState', 'FirstFP', 'SteadyState', str(nodes_for_averages[0]),\
               str(nodes_for_averages[1]), str(nodes_for_averages[2]), str(nodes_for_averages[3])]

    # Init a CSV to contain the data generated over the course of the run
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)

    # Variable saving the imported list of rules for use by the model.
    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    # An initializer for single KO mutants.
    # TODO I need to make it so this initializer can adapt to any number of KO/OE genes. As it stands it can only do up
    # to a double. Not helpful! Need to make recursive?
    if geneoff != [] or geneon != []:
        if len(geneoff) + len(geneon) == 1:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                for i in range(len(geneoff)):
                    if not data[geneoff[i]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Single Gene Knockout, Geneoff'
                for i in range(len(geneon)):
                    if data[geneon[i]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Single Gene Overexpression, Geneon'

        # An initializer for double KO when some on and some off
        elif len(geneoff) >= 1 and len(geneon) >= 1:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                if not data[geneoff[0]]:
                    if data[geneon[0]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Gene off then gene on, KO then Overexpression'
                elif data[geneon[0]]:
                    if not data[geneoff[0]]:
                        initializer_new.append(data_init)
                        if debug:
                            print 'Gene on then gene off, Overexpression then KO'
                else:
                    pass

        # An initializer for double KO when both genes are on or off
        elif len(geneoff) == 2 or len(geneon) == 2:
            d = []
            p = 0
            if debug:
                n = 0
            for data_init in initializer:
                data = data_init[0]
                i = 0
                while i <= 1:
                    if not data[geneoff[i]]:
                        i += 1
                        if not data[geneoff[i]]:
                            initializer_new.append(data_init)
                            if debug:
                                print 'Double Gene Knockout'
                            i += 1
                            break
                        break
                    break

            for data_init in initializer:
                data = data_init[0]
                i = 0
                while i <= 1:
                    if data[geneon[i]]:
                        i += 1
                        if data[geneon[i]]:
                            initializer_new.append(data_init)
                            if debug:
                                print 'Double Gene Overexpression'
                            i += 1
                            break
                        break
                    break
        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data vfariable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            # todo: switch to dynamic length if i is going to go over 10, becomes much less efficient quickly
            model.initialize(defaults=data)
            for i in range(timecourse):
                model.iterate(steps=i)
                e = model.detect_cycles()
                nodes = nodes_for_averages

                if e[1] == 1:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] > 1:

                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug == True:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]

                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] == 0 and i + 1 == timecourse:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

            # Iterate the model
            n += 1
            p += 1

            # Dump data to file if the iterator p has reached the dumpevery variable. Keeps RAM burden low. Resets the
            # p counter to zero and overwrites the data that had been written to file.
            if p == dumpevery:
                dumper()
                p = 0
                d = []

        # A catchall at the end of the set of simulations that dumps any remaining data that might not have been
        # enough to trigger the prior dumper.
        dumper()
        d = []

    else:
        d = []
        p = 0
        if debug:
            n = 0
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            for i in range(timecourse):
                model.iterate(steps=i)
                e = model.detect_cycles()
                nodes = nodes_for_averages
                print e

                if e[1] == 1:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    # Affix the limit cycle or FPA that defines the simulation to a new column
                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                elif e[1] > 1:

                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    detect_states = 0 - e[1]

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

                # This function makes sure that the entire user specified timecourse has passed before writing
                # that the model did not reach a steady state.
                elif e[1] == 0 and i + 1 == timecourse:
                    # append the number of index at which cycles began and the length of the cycle to data
                    d.append(list(model.detect_cycles()))
                    # append the model fingerprint for the entirety of the simulation
                    d[p].append(model.fp())
                    # append the first state of the model, coincides with assigned state
                    d[p].append(model.first)
                    # append the last state of the model, coincides with one part of LC or FPA
                    d[p].append(model.last)
                    # detect states is used to figure out how many states to write to the collector object.
                    detect_states = 0 - e[1]
                    # append the an int that represents the models starting state, useful for later sort functions
                    # especially in excel.
                    d[p].append(model.fp()[0])

                    # Affix the limit cycle or FPA that defines the simulation to a new column.
                    a = model.fp()
                    b = a[detect_states:]
                    c = sorted(b)
                    d[p].append(c)

                    # Collect the averages over the course of the run.
                    coll.collect(states=model.states[detect_states:],
                                 nodes=nodes)
                    # Console output for debugging and progress tracking
                    if debug:
                        print 'The fingerprint is', d[p][2]
                        print 'The cycle length is', d[p][1]
                        prc = ((n) / float(pow(2, load_status_divisor))) * 100
                        print '%.2f' % prc + "% done."
                        print '\n'
                    break

            # Iterate the model
            if debug:
                n += 1
            p += 1

            # Dump data to file if the iterator p has reached the dumpevery variable. Keeps RAM burden low. Resets the
            # p counter to zero and overwrites the data that had been written to file.
            if p == dumpevery:
                dumper()
                p = 0
                d = []
        # A catchall at the end of the set of simulations that dumps any remaining data that might not have been enough
        # to trigger the prior dumper.
        dumper()
        d = []

    # Generate a pandas DataFrame of the dump file.

    df = pd.read_csv(csv_out_txt)

    # Create count list of df
    fpa = df[df.CycleLength == 1]
    fpa["SteadyState"] = fpa['SteadyState'].str.strip('[]').astype(int)
    counts = fpa['SteadyState'].value_counts().sort_index()
    counts = pd.DataFrame([counts], columns=["SS", "NumberInFPA"])
    print counts

    counts['Proportion'] = counts['NumberInFPA'] / float(
        pow(2, load_status_divisor))

    # If there are nodes of interest in the nodes_for_averages will output the average on state of the nodes to the
    # dataframe  then update the CSV to contain them
    if nodes_for_averages is not []:
        for i in range(len(nodes_for_averages)):
            avgs = coll.get_averages(normalize=True)
            a1 = avgs[nodes_for_averages[i]]
            df.set_value(0, nodes_for_averages[i], a1[0])
        df.to_csv(csv_out_txt)

    #TODO: I need to create a method of exporting an excel file that is processed so that the user does not need to process them manually to get system state information

    # Graphical output
    # Will export a multicolor graph that shows the average on proportion of
    if graph is True:
        nfa = nodes_for_averages
        x = []
        for i in range(len(nfa)):
            x.append(nfa[i])
        gr = df[x]
        gr = gr.dropna()
        plot = gr.plot(kind='bar')
        plot.set_xlabel("Groups")
        plot.set_ylabel("On Proportion")
        plt.show()

    return df
示例#9
0
def all_ss_model_w_fix(str_file, str_mode, timecourse, csv_out_txt, geneon=[], geneoff=[], dumpevery=1000,\
                       nodes_for_averages=['Apoptosis', 'Proliferation', 'Angiogenesis', 'Differentiation']):
    '''
    A function to generate a list of all possible starting states of a model. Note that this function is a
    modification of the all_ss_model() function and allows for specific nodes to be fixed on or off.
    str_file: as a string, write the name of the file containing the rules for the boolean model
    str_mode: enter the mode of model that is going to be used, ie sync or async
    numnodes: the number of nodes in the model
    :return:
    A list of dictionaries with all possible starting states of the network.
    '''
    def dumper():
        '''
        will dump the data into a CSV file of name csv_out_txt
        :return: none
        '''
        with open(csv_out_txt, 'a') as csvfile:
            datadumper = csv.writer(csvfile, lineterminator='\n')
            for row in d:
                datadumper.writerow(row)
        return

    # header names for pandas dataframe and CSV data dump file
    headers = ['Index', 'CycleLength', 'CycleFingerprint', 'FirstState', 'LastState', str(nodes_for_averages[0]),\
               str(nodes_for_averages[1]), str(nodes_for_averages[2]), str(nodes_for_averages[3])]

    # Init a CSV to contain the data generated over the course of the run
    with open(csv_out_txt, 'wb') as out:
        csv_out = csv.writer(out)
        csv_out.writerow(headers)

    Bool1 = file(str_file).read()

    # Generate a new version of the model that allows the genes to be fixed on or off by removing updating rules for
    # them. This will not override the initialized value however, and that must be updated to be assigned on or off
    # separately.
    on = geneon
    off = geneoff
    Bool2 = tokenizer.modify_states(Bool1, on, off)
    model = Model(text=Bool2, mode=str_mode)
    initializer = state.all_initial_states(model.nodes, limit=None)

    # Utilized in the for loops for the loading bar
    load_status_divisor = len(model.nodes) - len(geneoff) - len(geneon)

    # The BooleanNet Data Collector. Here it is implemented to gather data on the states of the nodes in the model.
    coll = util.Collector()

    # Wiley Stoeber. 5/8/18. Create a modified version of the initializer that will pass over initial states that
    # contradict the gene set mode
    initializer_new = []

    if geneoff != [] or geneon != []:
        d = []
        p = 0
        n = 0
        for data_init in initializer:
            data = data_init[0]
            for i in range(len(geneoff)):
                if data[str(geneoff[i])]:
                    initializer_new.append(data_init)
            for i in range(len(geneon)):
                if data[str(geneon[i])]:
                    initializer_new.append(data_init)

        for data, initfunc in initializer_new:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = nodes_for_averages

            d.append(list(model.detect_cycles()))
            d[p].append(model.fp())
            d[p].append(model.first)
            d[p].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)
            # Console output for debugging and progress tracking
            print 'The fingerprint is', d[p][2]
            print 'The cycle length is', d[p][1]

            prc = ((n) / float(pow(2, load_status_divisor))) * 100
            print '%.2f' % prc + "% done."
            print '\n'

            # Iterate the model
            n += 1
            p += 1

            if p == dumpevery:
                dumper()
                p = 0
                d = []

        dumper()
        d = []
    else:
        d = []
        p = 0
        n = 0
        for data, initfunc in initializer:
            # Fixes genes on or off (True or False) at their starting state.
            for i in range(len(geneoff)):
                data.update({str(geneoff[i]): False})
            for i in range(len(geneon)):
                data.update({str(geneon[i]): True})

            # Initialize the model with the given pre-computed initial conditions stored in the data variable.
            # for a given model with Z nodes, there are 2 to the power of Z starting states.
            model.initialize(defaults=data)
            model.iterate(steps=timecourse)
            e = model.detect_cycles()
            nodes = nodes_for_averages

            d.append(list(model.detect_cycles()))
            d[p].append(model.fp())
            d[p].append(model.first)
            d[p].append(model.last)
            detect_states = 0 - e[1]
            coll.collect(states=model.states[detect_states:], nodes=nodes)

            # Console output for debugging and progress tracking
            print 'The fingerprint is', d[p][2]
            print 'The cycle length is', d[p][1]

            prc = ((n) / float(pow(2, load_status_divisor))) * 100
            print '%.2f' % prc + "% done."
            print '\n'

            # Iterate the model
            n += 1
            p += 1

            if p == dumpevery:
                dumper()
                p = 0
                d = []

        dumper()
        d = []

    # Generate a pandas dataframe of the dump file

    df = pd.read_csv(csv_out_txt)

    # Add the avg on state of the nodes specified by the user. They will output to new columns in row 1 of the data
    if nodes_for_averages != []:
        avgs = coll.get_averages(normalize=True)
        a1 = avgs[nodes_for_averages[0]]
        b1 = avgs[nodes_for_averages[1]]
        c1 = avgs[nodes_for_averages[2]]
        d1 = avgs[nodes_for_averages[3]]

        # Set the averages values in the dataframe.

        df.set_value(0, 'Apoptosis', a1[0])
        df.set_value(0, 'Proliferation', b1[0])
        df.set_value(0, 'Differentiation', d1[0])
        df.set_value(0, 'Angiogenesis', c1[0])

        df.to_csv(csv_out_txt)

    return df
示例#10
0
import boolean2
from boolean2 import util, state, network

# updating rules

rules = """
    CIS* = Ca
    CaATPase* = Ca
    Ca* = CIS and (not CaATPase)
"""

# create the model
model = boolean2.Model( text=rules, mode='async')

# generates all states, set limit to a value to keep only the first that many states
# when limit is a number it will take the first that many initial states
initializer = state.all_initial_states( model.nodes, limit=None )

# the data is the inital data, the func is the initializer
for data, initfunc in initializer:
    # shows the initial values
    print data
    model.initialize(missing=initfunc)
    model.iterate(5)