Пример #1
0
 def t_NUMBER(self, t):
     "[\+-]*\d+\.?\d*"
     try:
         t.value = float(t.value)
     except ValueError:
         util.error("value too large", t.value)
     return t
Пример #2
0
def p_expression_binop(p):
    """expression : expression AND expression
                  | expression OR expression 
    """
    if p[2] == 'and':
        p[0] = p.parser.RULE_AND(p[1], p[3], p)
    elif p[2] == 'or':
        p[0] = p.parser.RULE_OR(p[1], p[3], p)
    else:
        util.error("unknown operator '%s'" % p[2])
Пример #3
0
 def initialize(self, missing=None, defaults={} ):
     "Initializes the TimeModel"
     self.mode = ruleparser.TIME
     BoolModel.initialize( self, missing=missing, defaults=defaults )
     
     if not self.label_tokens:
         util.error( 'this mode of operation requires time labels for rules' )
     
     self.gcd   = util.list_gcd( self.ranks )
     self.step  = 0
     self.times = [ 0 ]
Пример #4
0
    def iterate(self,
                fullt,
                steps,
                autogen_fname=None,
                localdefs=None,
                autogen='autogen'):
        """
        Iterates over the system of equations 
        """
        if autogen_fname is not None:
            autogen = autogen_fname
            del autogen_fname
            util.warn(
                "parameter 'autogen_fname' is deprecated. Use 'autogen' instead."
            )

        # setting up the timesteps
        dt = fullt / float(steps)
        self.t = [dt * i for i in range(steps)]

        # generates the initializator and adds the timestep
        self.init_text = 'import os\n'
        self.init_text += self.generate_init(localdefs=localdefs)
        self.init_text += '\ndt = %s' % dt

        # print init_text

        # generates the derivatives
        self.func_text = self.generate_function()
        # print func_text

        self.dynamic_code = self.init_text + '\n' + self.func_text
        try:
            with open('%s.py' % autogen, 'wt') as f:
                f.write('%s\n' % self.init_text)
                f.write('%s\n' % self.func_text)
            autogen_mod = __import__(autogen)
            try:
                os.remove('%s.pyc' % autogen)
            except OSError:
                pass  # must be a read only filesystem
            reload(autogen_mod)
        except Exception as exc:
            msg = "'%s' in:\n%s\n*** dynamic code error ***\n%s" % (
                exc, self.dynamic_code, exc)
            util.error(msg)

        # x0 has been auto generated in the initialization

        self.alldata = rk4(autogen_mod.derivs, autogen_mod.x0, self.t)

        for index, node in enumerate(self.nodes):
            self.lazy_data[node] = [row[index] for row in self.alldata]
Пример #5
0
 def save_states(self, fname):
     """
     Saves the states into a file
     """
     if self.states:
         fp = open(fname, 'wt')
         cols = ['STATE'] + self.first.keys()
         hdrs = util.join(cols)
         fp.write(hdrs)
         for state in self.states:
             cols = [state.fp()] + state.values()
             line = util.join(cols)
             fp.write(line)
         fp.close()
     else:
         util.error('no states have been created yet')
Пример #6
0
    def iterate( self, fullt, steps, autogen_fname=None, localdefs=None, autogen='autogen'  ):
        """
        Iterates over the system of equations 
        """
        if autogen_fname is not None:
            autogen = autogen_fname
            del autogen_fname
            util.warn("parameter 'autogen_fname' is deprecated. Use 'autogen' instead." )
        
        # setting up the timesteps
        dt = fullt/float(steps)
        self.t  = [ dt * i for i in range(steps) ]

        # generates the initializator and adds the timestep
        self.init_text  = self.generate_init( localdefs=localdefs )
        self.init_text += '\ndt = %s' % dt

        #print init_text
        
        # generates the derivatives
        self.func_text = self.generate_function()
        #print func_text
       
        self.dynamic_code = self.init_text + '\n' + self.func_text             
        
        try:
            fp = open( '%s.py' % autogen, 'wt')
            fp.write( '%s\n' % self.init_text )
            fp.write( '%s\n' % self.func_text )
            fp.close()
            autogen_mod = __import__( autogen )
            try:
                os.remove( '%s.pyc' % autogen )
            except OSError:
                pass # must be a read only filesystem
            imp.reload( autogen_mod )
        except Exception as exc:
            msg = "'%s' in:\n%s\n*** dynamic code error ***\n%s" % ( exc, self.dynamic_code, exc )
            util.error(msg)

        # x0 has been auto generated in the initialization
        self.alldata = rk4(autogen_mod.derivs, autogen_mod.x0, self.t) 
        
        for index, node in enumerate( self.nodes ):
            self.lazy_data[node] = [ row[index] for row in self.alldata ]
Пример #7
0
def Model(text, mode):
    "Factory function that returns the proper class based on the mode"

    # the text parameter may be a file that contains the rules
    if os.path.isfile(text):
        text = open(text, 'rt').read()

    # check the validity of modes
    if mode not in boolean2.ruleparser.VALID_MODES:
        util.error('mode parameter must be one of %s' % VALID_MODES)

    # setup mode of operation
    if mode == boolean2.ruleparser.TIME:
        return timemodel.TimeModel(mode='time', text=text)
    elif mode == boolean2.ruleparser.PLDE:
        # matplotlib may not be installed
        # so defer import to allow other modes to be used
        return model.PldeModel(mode='plde', text=text)
    else:
        return boolmodel.BoolModel(mode=mode, text=text)
Пример #8
0
    def iterate( self, fullt, steps, autogen_fname=None, localdefs=None, autogen='autogen'  ):
        """
        Iterates over the system of equations 
        """
        if autogen_fname is not None:
            autogen = autogen_fname
            del autogen_fname
            util.warn("parameter 'autogen_fname' is deprecated. Use 'autogen' instead." )
        
        # setting up the timesteps
        dt = fullt/float(steps)
        self.t  = [ dt * i for i in range(steps) ]

        # generates the initializator and adds the timestep
        self.init_text  = self.generate_init( localdefs=localdefs )
        self.init_text += '\ndt = %s' % dt

        #print init_text
        
        # generates the derivatives
        self.func_text = self.generate_function()
        #print func_text
       
        self.dynamic_code = self.init_text + '\n' + self.func_text             
        
        try:
            fp = open( '%s.py' % autogen, 'wt')
            fp.write( '%s\n' % self.init_text )
            fp.write( '%s\n' % self.func_text )
            fp.close()
            autogen_mod = __import__( autogen )
            try:
                os.remove( '%s.pyc' % autogen )
            except OSError:
                pass # must be a read only filesystem
            reload( autogen_mod )
        except Exception, exc:
            msg = "'%s' in:\n%s\n*** dynamic code error ***\n%s" % ( exc, self.dynamic_code, exc )
            util.error(msg)
Пример #9
0
def Model(text, mode):
    "Factory function that returns the proper class based on the mode"

    # the text parameter may be a file that contains the rules
    if os.path.isfile( text ):
        #text = file(text, 'rt').read()
        text = open(text, 'rt').read()

    # check the validity of modes
    if mode not in ruleparser.VALID_MODES:
        util.error( 'mode parameter must be one of %s' % VALID_MODES)

    # setup mode of operation
    if mode == ruleparser.TIME:
        return timemodel.TimeModel(mode='time', text=text)
    elif mode == ruleparser.PLDE:
        # matplotlib may not be installed 
        # so defer import to allow other modes to be used
        from .plde import model
        return model.PldeModel( mode='plde', text=text)
    else:
        return boolmodel.BoolModel( mode=mode, text=text )
Пример #10
0
    def initialize(self, missing=None, defaults={}):
        """
        Initializes the model, needs to be called to reset the simulation 
        """

        # create a new lexer
        self.lexer = tokenizer.Lexer().lexer

        self.parser.old = state.State()
        self.parser.new = state.State()

        # references must be attached to the parser class
        # to be visible during parsing
        self.states = self.parser.states = [self.parser.old]

        # parser the initial data
        list(map(self.local_parse, self.init_lines))

        # deal with uninitialized nodes
        if self.uninit_nodes:
            if missing:
                for node in self.uninit_nodes:
                    value = missing(node)

                    self.parser.RULE_SETVALUE(self.parser.old, node, value,
                                              None)
                    self.parser.RULE_SETVALUE(self.parser.new, node, value,
                                              None)
            else:
                util.error('uninitialized nodes: %s' % list(self.uninit_nodes))

        # override any initalization with defaults
        for node, value in list(defaults.items()):
            self.parser.RULE_SETVALUE(self.parser.old, node, value, None)
            self.parser.RULE_SETVALUE(self.parser.new, node, value, None)

        # will be populated upon the first call
        self.lazy_data = {}
Пример #11
0
 def t_error(self, t):
     "Error message"
     msg = "lexer error in '%s' at '%s'" % (self.last, t.value)
     util.error(msg)
Пример #12
0
import sys, os
from itertools import *

from boolean2.boolmodel import BoolModel
from boolean2 import util, odict, tokenizer
import helper

try:
    import pylab
    from pylab import arange, rk4
except ImportError:
    util.error( "matplotlib is missing, install it from: http://matplotlib.sourceforge.net/")

def default_override( node, indexer, tokens ):
    """
    Gets called before the generating each equation.
    If this function returns anything other than None 
    it will override the equation
    """
    return None

def default_equation( tokens, indexer ):
    """
    Default equation generator, override this to generate
    other equations without explicit overrides
    """
    node = tokens[1].value
    text = helper.change(node, indexer) + ' = ' + helper.piecewise(tokens, indexer)
    return text
Пример #13
0
def p_error(p):
    if hasattr(p, 'value'):
        util.warn('at %s' % p.value)
    msg = "Syntax error in -> '%s'" % LAST_LINE
    util.error(msg)
Пример #14
0
def p_label_init(p):
    'stmt : LABEL '

    # this is for silencing unused token warnings,
    # labels are not used in the grammar only in the tokenizing phase
    util.error('invalid construct')
Пример #15
0
"""
Boolean Network Library

"""
import sys, re, os

__VERSION__ = '1.2.0-beta'

#from . import util
import boolean2.util

# require python 2.4 or higher
if sys.version_info[:2] < (2, 5):
    util.error("this program requires python 2.5 or higher" )

from . import ruleparser, boolmodel, timemodel, tokenizer

from .tokenizer import modify_states

def Model(text, mode):
    "Factory function that returns the proper class based on the mode"

    # the text parameter may be a file that contains the rules
    if os.path.isfile( text ):
        #text = file(text, 'rt').read()
        text = open(text, 'rt').read()

    # check the validity of modes
    if mode not in ruleparser.VALID_MODES:
        util.error( 'mode parameter must be one of %s' % VALID_MODES)
Пример #16
0
"""
Boolean Network Library

"""
import sys, re, os

__VERSION__ = '1.2.0-beta'

import boolean2.util

# require python 2.4 or higher
if sys.version_info[:2] < (2, 5):
    util.error("this program requires python 2.5 or higher")
import boolean2.ruleparser
from boolean2.ruleparser import *
import boolean2.boolmodel as boolmodel
from boolean2.plde import model
import boolean2.timemodel as timemodel
import boolean2.tokenizer as tokenizer
from boolean2.tokenizer import modify_states


def Model(text, mode):
    "Factory function that returns the proper class based on the mode"

    # the text parameter may be a file that contains the rules
    if os.path.isfile(text):
        text = open(text, 'rt').read()

    # check the validity of modes
    if mode not in boolean2.ruleparser.VALID_MODES:
Пример #17
0
from boolean2 import util
import random
from itertools import count

try:
    import networkx
    from networkx import components as component
except ImportError as exc:
    util.error(
        f"networkx import error : {exc}. Install newest version from https://networkx.lanl.gov/"
    )

# color constants
BLUE, RED, GREEN = "#0000DD", "#DD0000", "#00DD00"
WHITE, PURPLE, ORANGE = "#FFFFFF", "#990066", "#FF3300"
TEAL, CRIMSON, GOLD, NAVY, SIENNA = "#009999", "#DC143C", "#FFD700", "#000080", "#A0522D"
LIGHT_GREEN, SPRING_GREEN, YELLOW_GREEN = "#33FF00", "#00FF7F", "#9ACD32"


def component_colormap(graph):
    """
    Colormap by strong compoments
    """
    # automatically color by components

    # a list of colors in hexadecimal Red/Gree/Blue notation
    colors = [
        ORANGE,
        SPRING_GREEN,
        GOLD,
        TEAL,
Пример #18
0
    def __init__(self, mode, text):
        """
        Main parser baseclass for all models
        """

        # check the validity of modes
        if mode not in VALID_MODES:
            util.error('mode parameter must be one of %s' % VALID_MODES)

        # initialize the parsers
        self.parser = yacc.yacc(write_tables=0, debug=0)

        # set the mode
        self.parser.mode = mode

        # optimization: this check is used very often
        self.parser.sync = (self.parser.mode == SYNC
                            or self.parser.mode == TIME)

        # define default functions
        def get_value(state, name, p):
            return getattr(state, name)

        def set_value(state, name, value, p):
            setattr(state, name, value)
            return value

        #
        # setting the default rules
        #
        self.parser.RULE_AND = lambda a, b, p: a and b
        self.parser.RULE_OR = lambda a, b, p: a or b
        self.parser.RULE_NOT = lambda a, p: not a
        self.parser.RULE_SETVALUE = set_value
        self.parser.RULE_GETVALUE = get_value
        self.parser.RULE_START_ITERATION = lambda index, model: index

        #
        # internally we'll maintain a full list of tokens
        #
        self.tokens = tokenizer.tokenize(text)
        self.nodes = tokenizer.get_nodes(self.tokens)

        # isolate various types of tokens
        self.init_tokens = tokenizer.init_tokens(self.tokens)
        self.update_tokens = tokenizer.update_tokens(self.tokens)
        self.label_tokens = tokenizer.label_tokens(self.update_tokens)
        self.async_tokens = tokenizer.async_tokens(self.update_tokens)

        # finding the initial and update nodes
        self.init_nodes = tokenizer.get_nodes(self.init_tokens)
        self.update_nodes = tokenizer.get_nodes(self.update_tokens)

        # find uninizitalized nodes
        self.uninit_nodes = self.update_nodes - self.init_nodes

        # populate the initializer lines
        self.init_lines = list(map(tokenizer.tok2line, self.init_tokens))

        # populate the body by the ranks
        labelmap = {}

        for tokens in self.async_tokens:
            labelmap.setdefault(1, []).append(tokens)
        # overwrite the label token's value in nolabel modes
        if self.parser.mode in NOLABEL_MODE:
            for token in self.label_tokens:
                token[0].value = 1

        # for all PLDE, SYNC and ASYNC modes all ranks will be set to 1
        for tokens in self.label_tokens:
            rank = tokens[0].value
            short = tokens[1:]
            labelmap.setdefault(rank, []).append(short)

            # will iterate over the ranks in order
        self.ranks = list(sorted(labelmap.keys()))

        # build another parseable text, as lines stored for rank keys
        # by shuffling, sorting or reorganizing this body we can
        # implement various updating rule selection strategies
        self.update_lines = {}
        for key, values in labelmap.items():
            self.update_lines.setdefault(key, []).extend(
                list(map(tokenizer.tok2line, values)))