Exemplo n.º 1
0
    def getPetriNetData(self):
        """ Construct list of tokens from SPN, returns. """
        import parse_tokens as pat

        TLIST = []
        for i in self.vdict.keys():
            TLIST.append(pat.Token(i, self.vdict[i]))
        for j in self.mdict.keys():
            TLIST.append(pat.Token(j, self.vdict[i]))

        return TLIST
Exemplo n.º 2
0
    def check(tlist):
        """
        Error checking prior to returning token list after lexing.
        Tests validity and existence of the vital parameters, raises
        errors if the petri net isn't fully specified.
        """
        import parse_tokens as pat
        import numpy as np

        def check_uniqueness(ref):
            """
            Checks a list of given identifier to ensure unique
            identifiers have been assigned. If there are duplicates,
            appends an index to the end, e.g. is p = [ gA , gA , pA ]
            this method would ceonvert it to [gA , gA_1 , pA ]
            """
            for token in tlist:
                if token.label == ref:
                    test_list = token.value

            u_out = []
            co = 1
            if len(test_list) > len(set(test_list)):
                for p in test_list:
                    if p not in u_out:
                        u_out.append(p)
                    else:
                        u_out.append(p + '_' + str(co))
                        co += 1
                for item in tlist:
                    if item.label == ref:
                        item.value = u_out
                #raise AttributeError

        ## ERROR handling ##
        count = {}
        rates = []
        flags = []
        for token in tlist:
            count[token.label] = len(token.value)
            if token.label == 'r':
                rates = token.value
            if token.label == 'c':
                caps = token.value
            if token.label == 'pre':
                pre = token.value
            if token.label == 'post':
                post = token.value
            if token.label == 'test':
                test = token.value
                flags.append('t')
            if token.label == 'inhib':
                inhib = token.value
                flags.append('i')

        # Required parameters for Petri Net
        req = ['p', 't', 'r', 'm', 'pre', 'post']
        ## Current setup: ##
        # If rates are missing: set all to 0
        # If initial markings are missing: set all to 0
        # If p, t, pre or post are missing: Error
        for item in req:
            if item not in count.keys():
                if item == 'r':
                    tlist.append(pat.Token('r', np.zeros(count['t'])))
                    count['r'] = count['t']
                else:
                    if item == 'm':
                        tlist.append(pat.Token('m', np.zeros(count['p'])))
                        count['m'] = count['p']
                    else:
                        # Net not fully specified
                        # Params missing
                        # pass item to error handler
                        raise AttributeError

        check_uniqueness('p')
        check_uniqueness('t')

        missingno = 0
        if count['r'] < count['t']:
            # MISSING RATES!
            missingno = count['t'] - count['r']
            nrate = np.append(rates, np.zeros(missingno))
            tlist.append(pat.Token('r', nrate))

        try:
            miss = 0
            if count['c'] != 0:
                if count['c'] < count['p']:
                    miss = count['p'] - count['c']
                    ncap = np.append(caps, np.zeros(miss, dtype=int))
                    tlist.append(pat.Token('c', ncap))
        except KeyError:
            # if no capacities, don't worry
            pass

        # Check dimensions of pre and post matrices
        matdim = count['t'] * count['p']
        if matdim != pre.size or matdim != post.size:
            raise AttributeError

        if 't' in flags:
            if matdim != test.size:
                raise AttributeError
        if 'i' in flags:
            if matdim != inhib.size:
                raise AttributeError

        if count['pre'] != count['post'] or count['pre'] != count['t']:
            # Matrices error
            raise AttributeError

        # Test for negative rates
        for value in rates:
            if value < 0:
                raise AttributeError

        # Test for negative pre/post
        l = np.ravel(pre)
        ll = np.ravel(post)
        for vals in l:
            if vals < 0:
                raise AttributeError
        for va in ll:
            if va < 0:
                raise AttributeError
Exemplo n.º 3
0
    def lex(self, filename):
        """
        Takes a (correctly) formatted .xml SBML v3.1 file as input, and returns
        an unordered list of token objects, with properties 'label'
        and 'value'.
        """
        import libsbml as lb
        import numpy as np
        import parse_tokens as pat
        import numpy as np

        # Initialise vars
        rates = []
        reactlist = []
        prodlist = []
        rdict = {}
        pdict = {}
        TOK = []

        # Retrieve SBML model
        self.input = filename
        reader = lb.SBMLReader()
        document = reader.readSBML(filename.name)
        sbmlModel = document.getModel()

        # Get places, transitions, initial markings:
        pnum = sbmlModel.getNumSpecies()
        tnum = sbmlModel.getNumReactions()
        Plist = [sbmlModel.getSpecies(i).getId() for i in range(pnum)]
        Mlist = [int(sbmlModel.getSpecies(r).getInitialAmount()) \
                     for r in range(pnum)]
        Tlist = [sbmlModel.getReaction(q).getId() for q in range(tnum)]

        # Get reactions:
        for r in range(sbmlModel.getNumReactions()):
            reaction = sbmlModel.getReaction(r)
            for rt in range(reaction.getNumReactants()):
                # for pre matix
                rdict[reaction.getId(),rt] = \
                    reaction.getReactant(rt).getSpecies(),\
                    reaction.getReactant(rt).getStoichiometry()
                reactlist.append([reaction.getId(), \
                                  reaction.getReactant(rt).getSpecies(), \
                                  reaction.getReactant(rt).getStoichiometry()])
            for rp in range(reaction.getNumProducts()):
                # for post matrix
                pdict[reaction.getId()] = \
                    reaction.getProduct(rp).getSpecies(),\
                    reaction.getProduct(rp).getStoichiometry()
                prodlist.append([reaction.getId(), \
                                   reaction.getProduct(rp).getSpecies(),\
                                   reaction.getProduct(rp).getStoichiometry()])
            # Get rates:
            # NOTE: requires reaction rates encoded in KineticLaw
            for qr in range(reaction.getKineticLaw().getNumLocalParameters()):
                rates.append \
                    (reaction.getKineticLaw().getLocalParameter(qr).getValue())

        # Pre matrices is t x p
        # each row: a transition from p1 ... px
        # initialise blank matrices of correct size:
        pre = np.zeros(shape=(tnum, pnum), dtype=int)
        post = pre.copy()

        # Set up labeling of pre and post matrices for conversion
        cols = {}
        for i in range(pnum):
            cols[Plist[i]] = i

        rows = {}
        for j in range(tnum):
            rows[Tlist[j]] = j

        try:
            for j in reactlist:
                # row co-ords: rows[j[0]]
                # colum co-ords:  cols[j[1]]
                # new val: j[2] - note INTEGER!
                pre[rows[j[0]], cols[j[1]]] = int(j[2])
        except KeyError:
            # Missing place or transition
            print "Error with place or transition identifiers"
            # Malformed input file
            raise AttributeError

        for p in prodlist:
            post[rows[p[0]], cols[p[1]]] = int(p[2])

        ## Build token list
        vectok = {'p': Plist, 't': Tlist, 'r': rates, 'm': Mlist}
        mattok = {'pre': pre, 'post': post}

        for key in vectok.keys():
            if len(vectok[key]) != 0:
                TOK.append(pat.Token(key, np.array(vectok[key])))

        for ky in mattok.keys():
            TOK.append(pat.Token(ky, np.matrix(mattok[ky])))

        # Error handling via parent class
        self.check(TOK)

        # return unordered list of token objects
        return TOK
Exemplo n.º 4
0
    def lex(self, filename):
        """
        Takes a (correctly) formatted PNML file as input, and returns
        an unordered list of token objects, with properties 'label'
        and 'value'. This list should then be passed to an RParser
        object.
        """
        from xml.dom.minidom import parseString
        import parse_tokens as pat
        import numpy as np
        import re

        self.input = filename
        data = self.input.read()
        self.input.close()
        getpid = re.compile('(?i)place(?:.*)id\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        gettid = re.compile(
            '(?i)transition(?:.*)id\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        getaid = re.compile('(?i)arc(?:.*)id\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        getatype = re.compile('(?i)type\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        getnid = re.compile('(?i)net(?:.*)id\w?=\w?\"?([a-zA-Z_0-9 ]*)\"?')
        getcapac = re.compile('(?i)capacity\w?=\w?\"?([0-9]*)\"?')
        getsource = re.compile('(?i)source\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        gettarg = re.compile('(?i)target\w?=\w?\"?([a-zA-Z_0-9]*)\"?')
        dom = parseString(data)

        plist = []
        mlist = []
        clist = []
        pcount = 0
        for place in dom.getElementsByTagName('place'):
            tag = place.toxml()
            m = None
            try:
                # For places where the place name is encoded in the
                # <place> .. <name> .. <text>
                data = str(place.getElementsByTagName('name')[0]\
                              .getElementsByTagName('text')[0]\
                               .firstChild.data).rstrip()
            except IndexError:
                pass
            try:
                # For places where the place id tag is the name of the place
                data = str(re.search(getpid, tag).group(1).rstrip())
            except AttributeError:
                pass
            try:
                # Try different ways of retrieving the marking, allows
                # flexibility in input file. #1 = <marking><value>X</.. </..
                m = int(place.getElementsByTagName('marking')[0]\
                            .getElementsByTagName('value')[0].firstChild.data)
            except IndexError:
                pass
            try:
                # #2 = <initialMarking><valueX</.. </..
                # Takes precendence over the above
                m = int(place.getElementsByTagName('initialMarking')[0]\
                            .getElementsByTagName('value')[0].firstChild.data)
            except IndexError:
                pass
            try:
                # #3 = <initialMarking><text>X</.. </..
                # Takes precendence over the above
                m = int(place.getElementsByTagName('initialMarking')[0]\
                            .getElementsByTagName('text')[0].firstChild.data)
            except IndexError:
                pass
            if m == None:
                print "Note: No initial markings detected, set to 0"
                ### WARNING NO INITIAL MARKING SET FOR PLACE ###
                mlist.append(0)
            else:
                mlist.append(m)
            try:
                cap = int(re.search(getcapac, tag).group(1).rstrip())
                if cap != 0:
                    clist.append(cap)
            except:
                pass
            plist.append(data)
            pcount += 1

        tlist = []
        rates = []
        tcount = 0
        for trans in dom.getElementsByTagName('transition'):
            # Initialise to blank each time
            tid = None
            tag = trans.toxml()

            # ALT methods of retrieving transition names:
            # NOTE: arc source/target (should) use transition id as a reference
            # try:
            #     #preferably get the name from <name>..<value> OR <text>
            #     tid = str(trans.getElementsByTagName('name')[0]\
            #                .getElementsByTagName('value')[0].firstChild.data)\
            #                .rstrip()
            # except:
            #     pass
            # try:
            #     tid = str(trans.getElementsByTagName('name')[0]\
            #                 .getElementsByTagName('text')[0].firstChild.data)\
            #                 .rstrip()
            # except:
            #     pass

            try:
                # Is transition name encoded in id?
                tid = str(re.search(gettid, tag).group(1))
            except:
                # else assign out own IDs
                tid = 't_' + str(tcount)

            tlist.append(tid)
            tcount += 1

            ## RATES
            try:
                rate = str(trans.getElementsByTagName('rate')[0]\
                    .getElementsByTagName('value')[0].firstChild.data)
                rates.append(float(rate))
            except:
                pass

        # Get arcs
        arcd = {}
        stoich = None
        acount = 0
        for arc in dom.getElementsByTagName('arc'):
            tag = arc.toxml()
            try:
                atype = arc.getElementsByTagName('type')[0].firstChild.data
                print atype
            except IndexError:
                pass
            try:
                stoich = arc.getElementsByTagName('text')[0].firstChild.data
            except IndexError:
                pass
            try:
                stoich = arc.getElementsByTagName('value')[0]\
                    .firstChild.data
                stoich = arc.getElementsByTagName('inscription')[0]\
                    .getElementsByTagName('value')[0].firstChild.data
            except IndexError:
                pass

            if stoich == None:
                ### WARNING: MISSING STOICHIOMETRIES
                ### ASSUMED ONE
                print "Note: Missing stoichiometry, set to 1"
                stoich = 1

            # arc ID
            try:
                aid = str(re.search(getaid, tag).group(1))
            except:
                aid = acount

            afrom = str(re.search(getsource, tag).group(1))

            # find test and inhibitory arcs:
            try:
                atype = str(re.search(getatype, tag).group(1))
            except:
                atype = None

            if atype == 'test':
                mat = 'test'
            elif atype == 'inhibitory':
                mat = 'inhib'
            else:
                if afrom in plist:
                    mat = 'pre'
                else:
                    mat = 'post'

            ato = str(re.search(gettarg, tag).group(1))
            # ARC d
            arcd[aid] = [int(stoich), afrom, ato, mat]
            acount += 1

        # if [1] in plist >> pre
        # if [1] in tlist >> post
        net = re.search(getnid, str(dom.getElementsByTagName('net')[0]\
                                        .toxml())).group(1)
        # format { arcid : \
        # [stoichiometry, from (place or trans), to (p or t)], 'pre' or 'post' }
        ## Pre and Post ##
        pre = np.zeros(shape=(len(tlist), len(plist)), dtype=int)
        post = pre.copy()

        # test and inhib may not be needed
        test = pre.copy()
        inhib = pre.copy()
        zeros = pre.copy().tolist()

        cols = {}
        for i in range(len(plist)):
            # buils dict for matrices population purposes
            # columns of pre/post relate to places
            cols[plist[i]] = i

        rows = {}
        for j in range(len(tlist)):
            # rows relate to transitions
            rows[tlist[j]] = j

        try:
            for arc in arcd.values():
                if arc[3] == 'pre':
                    pre[rows[arc[2]], cols[arc[1]]] = int(arc[0])
                elif arc[3] == 'post':
                    post[rows[arc[1]], cols[arc[2]]] = int(arc[0])
                elif arc[3] == 'test':
                    test[rows[arc[2]], cols[arc[1]]] = int(arc[0])
                elif arc[3] == 'inhib':
                    inhib[rows[arc[2]], cols[arc[1]]] = int(arc[0])
        except KeyError:
            print "Error with place or transition identifiers"
            raise AttributeError

        ## Build token list
        TOK = []
        vectok = {'p': plist, 't': tlist, 'r': rates, 'm': mlist, 'c': clist}
        mattok = {'pre': pre, 'post': post, 'inhib': inhib, 'test': test}

        for key in vectok.keys():
            if len(vectok[key]) != 0:
                TOK.append(pat.Token(key, np.array(vectok[key])))

        for ky in mattok.keys():
            if mattok[ky].tolist() != zeros:
                TOK.append(pat.Token(ky, np.matrix(mattok[ky])))

        # run error handling
        self.check(TOK)

        # return unordered list of token objects
        return TOK
Exemplo n.º 5
0
    def lex(self, infile):
        """
        Takes a (correctly) formatted .txt file as input, and returns
        an unordered list of token objects, with properties 'label'
        and 'value'; for example, a .txt file with the line:
        p = ['X1', 'X2']
        Will be processed by lex to return a list containing a token object
        with label 'p' and value numpy.array(['X1', X2'])
        """

        import re
        import numpy as np
        import parse_tokens as pat
        import array

        TOKLIST = []
        # REs used to pick out SPN variables
        ptok = re.compile("(?ism)^\W*p(?:laces)?\W*=\W*(\[.*?\])")
        ttok = re.compile("(?ism)^\W*t(?:ransitions)?\W*=\W*(\[.*?\])")
        rtok = re.compile("(?ism)^\W*r(?:ates?)?\W*=\W*(\[.*?\])")
        # optional rates?
        mtok = re.compile("(?ism)^\W*m(?:arkings?)?\W*=\W*(\[.*?\])")
        ctok = re.compile("(?ism)^\W*c(?:apacities)?\W*=\W*(\[.*?\])")
        pretok = re.\
                   compile("(?ism)^\W*pre(?:[_| ]arcs?)?\W*=\W*(\[(?:\W)*\[.*?\]{2})")
        posttok = re.\
                   compile("(?ism)^\W*post(?:[_| ]arcs?)?\W*=\W*(\[(?:\W)*\[.*?\]{2})")
        testtok = re.\
                   compile("(?ism)^\W*test(?:[_| ]arcs?)?\W*=\W*(\[(?:\W)*\[.*?\]{2})")
        inhibtok = re.compile\
               ("(?ism)^\W*I(?:nhib(?:itory[_| ]arcs?)?)?\W*=\W*(\[(?:\W)*\[.*?\]{2})")
        # optional i?
        # s = DOTALL, m = MULTILINE, i = IGNORECASE
        # Relate above REs to their standard labels used for simulation
        v_tok = {ptok: 'p', ttok: 't', rtok: 'r', mtok: 'm', ctok: 'c'}
        m_tok = {pretok : 'pre', posttok : 'post', \
                     testtok :'test', inhibtok :'inhib'}

        testfile = infile.readlines()
        infile.seek(0)
        # set max size of input file
        finput = infile.read(4096)

        # Only adds what it can find, parent class handles defaults and errors
        ## Build token list
        for i in v_tok.keys():
            if i.search(finput):
                try:
                    vval = np.array(eval(i.findall(finput)[0]), ndmin=1)
                    if i == ctok:
                        # don't add zero capacities
                        nonzero = False
                        for num in vval:
                            if num != 0:
                                nonzero = True
                        if nonzero == True:
                            TOKLIST.append(pat.Token(v_tok[i], vval))
                    else:
                        TOKLIST.append(pat.Token(v_tok[i], vval))
                except:  # NameError, SyntaxError:
                    # If no quotes...
                    # OR if single quote: i.e. 'place1, place2 etc.
                    abc = i.findall(finput)[0].strip()
                    it = re.findall('[a-zA-Z0-9_]+', abc)
                    TOKLIST.append(pat.Token(v_tok[i], np.array(it, ndmin=1)))
        for j in m_tok.keys():
            if j.search(finput):
                # Same for matrices
                mval = np.matrix(eval(j.findall(finput)[0].replace('\n',''))\
                                     , dtype = int)
                TOKLIST.append(pat.Token(m_tok[j], mval))

        # error handling using parent method
        self.check(TOKLIST)

        # return ordered list of tokens
        return TOKLIST