Beispiel #1
0
class Game:
    def __init__(self, context):
        self.ctx = context
        self.m = StateMachine()
        self.m.add_state("init", self.sm_init)
        #       m.add_state("idle", sm_idle)
        self.m.add_state("start", self.sm_start)
        self.m.add_state("end", self.sm_end, end_state=1)
        self.m.set_start("init")
        self.m.run(self.ctx)

    def sm_init(self, ctx):
        # initialize
        newState = "start"
        return newState, ctx

    def sm_start(self, ctx):
        # pick two players, start game
        p1, p2 = self.get_random_players()
        p1.init(10)
        p2.init(10)

        p1.print_status()
        p2.print_status()

        res = None
        player = [p1, p2]
        next_move = 0
        while p1.has_ships() and p2.has_ships():
            x, y = player[next_move].get_move(res)
            res = player[next_move ^ 1].set_move(x, y)
            if res not in ["inj", "sink"]:
                next_move ^= 1

        if p1.has_ships():
            print "Player1 won"
        else:
            print "Player2 won"

        # TODO: should be move to "end" state
        p1.finalize()
        p2.finalize()

        newState = "end"

        return newState, ctx

    def sm_end(self, ctx):
        pass

    def get_random_players(self):
        # ugly:
        players = self.ctx.get_players()
        player1 = players.get_player()
        player2 = players.get_player(not_this_one=player1)
        return player1, player2

    def start_game(self):
        pass
def startthegoddamnedgame():
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("Game_Over", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)
def startthegoddamnedgame():
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("Game_Over", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)
Beispiel #4
0
def initSearchStateMachine():
    print("Creating STATA MACHINE for searching!")
    m = StateMachine()
    m.add_state("START_Search", StartSearch_state)
    m.add_state("DocSearcher_LoadDocumentsCluster",
                DocSearcher_LoadDocumentsCluster_state)
    m.add_state("DocSearcher_waitInput", DocSearcher_waitInput_state)
    m.add_state("DocSearcher_search", DocSearcher_search_state)
    m.add_state("DocSearcher_showResults", DocSearcher_showResults_state)
    m.add_state("End", EndSearch_state, end_state=1)
    m.set_start("START_Search")
    return m
Beispiel #5
0
def run(fpin, fpout):

	global fpindex

	fpindex = fpout
	m = StateMachine();
	m.add_state(parse)
	m.add_state(NOTES)
	m.add_state(QUOTES)
	m.add_state(segment)
	m.add_state(error, end_state=1)
	m.add_state(eof, end_state=1)
	m.set_start(parse)
	m.run((fpin, ''))	
Beispiel #6
0
def parseBMRB(f): 
    # This is the actual program
    m = StateMachine()
    m.add_state("Start", start)
    m.add_state("open_file", open_file)
    m.add_state("read_lines", read_lines)
    m.add_state("get_sequence", get_sequence)
    m.add_state("get_CS", get_CS)
    m.add_state("get_headers", get_headers)
    m.add_state("get_uniProt", get_uniProt)
    m.add_state("end_reading", end_reading, end_state=1)
    m.add_state("empty_file", empty_file)
    m.add_state("no_file", no_file)
    m.set_start("Start")
    a = m.run(f)[2]
    return a
Beispiel #7
0
def initScanStateMachine():
    print("Creating STATA MACHINE for document scanning!")
    m = StateMachine()
    m.add_state("START_NewDocument", StartNewDocument_state)
    m.add_state("ScannerIO_Scan", ScannerIO_Scan_state)
    m.add_state("ScannerIO_SaveImage", ScannerIO_SaveImage_state)
    m.add_state("ScannerIO_CreateTXT", ScannerIO_CreateTXT_state)
    m.add_state("ScannerIO_SaveHTML", ScannerIO_SaveHTML_state)
    m.add_state("ScannerIO_LoadDocuments", ScannerIO_LoadDocuments_state)
    m.add_state("ScannerIO_AddDocuments", ScannerIO_AddDocuments_state)
    m.add_state("ScannerIO_SaveDocuments", ScannerIO_SaveDocuments_state)
    m.add_state("DocCluster_LoadDocumentsCluster",
                DocCluster_LoadDocumentsCluster_state)
    m.add_state("DocCluster_LoadTXT", DocCluster_LoadTXT_state)
    m.add_state("ProcessCluster", ProcessCluster_state)
    m.add_state("SaveCluster", SaveCluster_state)
    m.add_state("End", EndNewDocument_state, end_state=1)
    # set start state
    m.set_start("START_NewDocument")
    return m
    def handle(self):
        m = StateMachine()
        try:
            m.add_state('greeting', greeting)
            m.add_state('helo', helo)
            m.add_state('mail', mail)
            m.add_state('rcpt', rcpt)
            m.add_state('data', data)
            m.add_state('process', process)
            m.add_state('done', None, end_state=1)
            m.set_start('greeting')

            m.run((self, {}))

        # in the event of an exception, capture the current
        # state and cargo dict and use the information
        # as part of the message sent to stdout
        except Exception as e:
            exception_data = {'state': m.current_state}
            if m.current_cargo:
                exception_data['data'] = m.current_cargo[1]
            e.args = (exception_data, )
            raise
    def handle (self):
        m = StateMachine()
        try:
            m.add_state('greeting', greeting)
            m.add_state('helo', helo)
            m.add_state('mail', mail)
            m.add_state('rcpt', rcpt)
            m.add_state('data', data)
            m.add_state('process', process)
            m.add_state('done', None, end_state=1)
            m.set_start('greeting')

            m.run((self, {}))

        # in the event of an exception, capture the current
        # state and cargo dict and use the information
        # as part of the message sent to stdout
        except Exception as e:
            exception_data = {'state':m.current_state}
            if m.current_cargo:
                exception_data['data'] = m.current_cargo[1]
            e.args = (exception_data,)
            raise
Beispiel #10
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("CODE-START", self._code_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.add_state("CHARACTER-ENTITY", self._character_entity)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            'escape': re.compile(r'\\', re.U),
            'escaped-chars': re.compile(r'[\\\(\{\}\[\]_\*,\.\*`"&]', re.U),
            'annotation': re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\s*(~(?P<language>[\w-]+))?\))?', re.U),
            'bold': re.compile(r'\*(?P<text>((?<=\\)\*|[^\*])*)(?<!\\)\*', re.U),
            'italic': re.compile(r'_(?P<text>((?<=\\)_|[^_])*)(?<!\\)_', re.U),
            'code': re.compile(r'`(?P<text>(``|[^`])*)`', re.U),
            'quotes': re.compile(r'"(?P<text>((?<=\\)"|[^"])*)(?<!\\)"', re.U),
            'inline-insert': re.compile(r'>\((?P<attributes>.*?)\)', re.U),
            'character-entity': re.compile(r'&(\#[0-9]+|#[xX][0-9a-fA-F]+|[\w]+);'),
            'citation': re.compile(r'(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])', re.U)
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ''
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ''
            return "END", para
        if char == '\\':
            return "ESCAPE", para
        elif char == '{':
            return "ANNOTATION-START", para
        elif char == '[':
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "CODE-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        elif char == "&":
            return "CHARACTER-ENTITY", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns['annotation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            annotation_type = match.group('type')
            language = match.group('language')
            text = self._unescape(match.group("text"))

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None and not language:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else output a warning.
                    else:
                        self.current_string += text
                        SAM_parser_warning(
                                "Blank annotation found: {" +
                                text + "} " +
                                "If you are trying to insert curly braces " +
                                "into the document, use \{" + text +
                                "]. Otherwise, make sure annotated text matches "
                                "previous annotation exactly."
                        )
            else:
                #Check for link shortcut
                if urlparse(annotation_type,None).scheme is not None:
                    specifically = annotation_type
                    annotation_type='link'
                else:
                    specifically = match.group('specifically') if match.group('specifically') is not None else None
                namespace = match.group('namespace').strip() if match.group('namespace') is not None else None
                self.flow.append(Annotation(annotation_type, text, specifically, namespace, language))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '{'
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns['citation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''

            try:
                idref = match.group('id')
            except IndexError:
                idref=None
            try:
                nameref = match.group('name')
            except IndexError:
                nameref = None
            try:
                citation = match.group('citation')
            except IndexError:
                citation=None

            if idref:
                citation_type = 'idref'
                citation_value = idref.strip()
                extra = match.group('id_extra')
            elif nameref:
                citation_type = 'nameref'
                citation_value = nameref.strip()
                extra = match.group('name_extra')
            else:
                citation_type = 'citation'
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '['
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns['bold'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('bold', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '*'
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns['italic'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('italic', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '_'
        return "PARA", para

    def _code_start(self, para):
        match = self.patterns['code'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('code', (match.group("text")).replace("``", "`")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '`'
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns['quotes'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('quotes', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns['inline-insert'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns['inline-insert_id'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert('reference', match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _character_entity(self, para):
        match = self.patterns['character-entity'].match(para.rest_of_para)
        if match:
            self.current_string += self.patterns['character-entity'].sub(self._replace_charref, match.group(0))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '&'
        return "PARA", para

    def _replace_charref(self, match):
        try:
            charref = match.group(0)
        except AttributeError:
           charref = match
        character = html.unescape(charref)
        if character == charref:  # Escape not recognized
            raise SAMParserError("Unrecognized character entity found: " + charref)
        return character

    def _escape(self, para):
        char = para.next_char
        if self.patterns['escaped-chars'].match(char):
            self.current_string += char
        else:
            self.current_string += '\\' + char
        return "PARA", para

    def _unescape(self, string):
        result = ''
        e = enumerate(string)
        for pos, char in e:
            try:
                if char == '\\' and self.patterns['escaped-chars'].match(string[pos+1]):
                    result += string[pos+1]
                    next(e, None)
                elif char == '&':
                    match = self.patterns['character-entity'].match(string[pos:])
                    if match:
                        result += self.patterns['character-entity'].sub(self._replace_charref, match.group(0))
                        for i in range(0, len(match.group(0))):
                            next(e, None)
                    else:
                        result += char
                else:
                    result += char
            except IndexError:
                result += char
        return result
Beispiel #11
0
    with open('变量字典.txt', 'r') as f:
        data = f.read()
    m = pblock.split(data)

    Dict_Dim = []

    i, j = 1, 1
    ndm = 0
    for rawmatch in m:
        match = re.sub('\n{1,}', '\n', rawmatch)
        if i == 1:  # 变量字典的说明
            pass
        elif i == 2:  # 数组尺寸声明
            lines = pline.findall(match)
            sm.set_start("Dedent")
            sm.handler = sm.handlers[sm.startState]
            for i, line in enumerate(lines):

                if i != 0:
                    if description_pos >= 20:
                        if description_pos == last_pos:
                            newState = "Nodent"
                        elif description_pos < last_pos:
                            newState = "Dedent"
                        else:
                            newState = "Indent"
                        num_dent = int((description_pos - last_pos) / 3)
                    else:
                        newState = "Endent"
                else:
Beispiel #12
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM", self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            "sam-declaration": re.compile(r"sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?"),
            "comment": re.compile(r"\s*#.*"),
            "block-start": re.compile(
                r"(?P<indent>\s*)(?P<element>[\w_\.-]+?):(\((?P<attributes>.*?(?<!\\))\))?(?P<content>.+)?"
            ),
            "codeblock-start": re.compile(
                r'(?P<indent>\s*)(?P<flag>```[^\s\(]*)(\((?P<language>\w*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?'
            ),
            "blockquote-start": re.compile(
                r'(?P<indent>\s*)("""|\'\'\'|blockquote:)(\((?P<attributes>.*?(?<!\\))\))?((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?'
            ),
            "fragment-start": re.compile(r"(?P<indent>\s*)~~~(\((?P<attributes>.*?)\))?"),
            "paragraph-start": re.compile(r"\w*"),
            "line-start": re.compile(r"(?P<indent>\s*)\|(\((?P<attributes>.*?)\))?\s(?P<text>.*)"),
            "blank-line": re.compile(r"^\s*$"),
            "record-start": re.compile(r"(?P<indent>\s*)(?P<record_name>[a-zA-Z0-9-_]+)::(?P<field_names>.*)"),
            "list-item": re.compile(r"(?P<indent>\s*)(?P<marker>\*\s+)(?P<content>.*)"),
            "num-list-item": re.compile(r"(?P<indent>\s*)(?P<marker>[0-9]+\.\s+)(?P<content>.*)"),
            "labeled-list-item": re.compile(r"(?P<indent>\s*)\|(?P<label>\S.*?)(?<!\\)\|\s+(?P<content>.*)"),
            "block-insert": re.compile(r"(?P<indent>\s*)>>\((?P<attributes>.*?)\)\w*"),
            "string-def": re.compile(r"(?P<indent>\s*)\$(?P<name>\w*?)=(?P<value>.+)"),
            "embedded-xml": re.compile(r"(?P<indent>\s*)(?P<xmltag>\<\?xml.+)"),
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError("Document ended before structure was complete. At:\n\n" + self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns["sam-declaration"].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        element = match.group("element").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        self.doc.new_block(element, attributes, para_parser.parse(content, self.doc), indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns["codeblock-end"] = re.compile(r"(\s*)" + codeblock_flag + "\s*$")

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes["language"] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block("codeblock", attributes, None, indent)
        self.pre_start("")
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns["codeblock-end"].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", context
        else:
            self.pre_append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group("indent"))

        # TODO: Refactor this with the paraparser version

        extra = source.current_line.rstrip()[len(match.group(0)) :]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " + extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block("blockquote", attributes, None, indent)

        # see if there is a citation
        try:
            idref = match.group("id")
        except IndexError:
            idref = None
        try:
            nameref = match.group("name")
        except IndexError:
            nameref = None
        try:
            citation = match.group("citation")
        except IndexError:
            citation = None

        if idref:
            citation_type = "idref"
            citation_value = idref.strip()
            extra = match.group("id_extra")
        elif nameref:
            citation_type = "nameref"
            citation_value = nameref.strip()
            extra = match.group("name_extra")
        elif citation:
            citation_type = "citation"
            citation_value = citation.strip()
        else:
            citation_type = None

        if citation_type:
            cit = Citation(citation_type, citation_value, extra)
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group("indent"))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block("fragment", attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, "", local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            return "END", context

        if self.patterns["blank-line"].match(line):
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(["p", "li"]):
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            source.return_line()
            return "SAM", context

        self.paragraph_append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        content_indent = indent + len(match.group("marker"))
        self.doc.new_unordered_list_item(indent, content_indent)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        content_indent = indent + len(match.group("marker"))
        self.doc.new_ordered_list_item(indent, content_indent)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        self.doc.new_labeled_list_item(indent, label)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert", attributes=parse_insert(match.group("attributes")), text=None, indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group("name"), para_parser.parse(match.group("value"), self.doc), indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block(
            "line",
            self.parse_block_attributes(match.group("attributes")),
            para_parser.parse(match.group("text"), self.doc, strip=False),
            indent=indent,
        )
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("record_name").strip()
        field_names = [x.strip() for x in match.group("field_names").split(",")]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns["blank-line"].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r"(?<!\\),", line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError("Record length does not match record set header. At:\n\n " + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler = self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code == 9:  # junk after document element
                source.return_line()
                xml_text = "".join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError("The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError("The encoding of an embedded XML fragment must be UTF-8.")

    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns["comment"].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns["record-start"].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns["blank-line"].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns["codeblock-start"].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns["blockquote-start"].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns["fragment-start"].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns["list-item"].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns["num-list-item"].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns["labeled-list-item"].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns["block-insert"].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns["string-def"].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns["line-start"].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns["embedded-xml"].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns["block-start"].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns["paragraph-start"].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [x for x in attributes_list if not (x[0] in "?#*")]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(", ".join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == "*"]
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " + ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == "#"]
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " + ", ".join(names))
        conditions = [x[1:] for x in attributes_list if x[0] == "?"]
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Beispiel #13
0
# create the state machine
m2 = StateMachine()
m2.add_state("FindState_state", findState_transitions)
m2.add_state("Header_state", header_transitions)
m2.add_state("Seats_state", seats_transitions)
m2.add_state("Blinds_state", blinds_transitions)
m2.add_state("Preflop_state", preflop_transitions)
m2.add_state("Flop_state", flop_transitions)
m2.add_state("Turn_state", turn_transitions)
m2.add_state("River_state", river_transitions)
m2.add_state("Showdown_state", showdown_transitions)
m2.add_state("Summary_state", summary_transitions)
m2.add_state("End_state", None, end_state=1)

m2.set_start("Header_state")

# READ THE INPUT FILE AND RUN THE STATE MACHINE FOR EACH VALID INPUT
inputText = []
with open(sys.argv[1], 'r') as f:
    while True:
        line = f.readline()

        # CHECK FOR END OF FILE
        if not line:
            break

        # CHECK FOR INFO IN THE LINE
        if len(line) > 1:
            inputText += [line]
Beispiel #14
0
        j=j+1
    if floor in L2:
        print("Lift is already at "+str(floor))
        L2.remove(floor)

    if(len(L2)>0):   
        print("Enter UP or DOWN for each floor")
        for i in range(len(L2)):
            L2_button.append(raw_input(str(L2[i])+" = "))

    top_floor=0
    bottom_floor=tot_floors
    for i in range(len(L1)):
        if top_floor<L1[i]:
            top_floor=L1[i]
        if bottom_floor>L1[i]:
            bottom_floor=L1[i]
    for i in range(len(L2)):
        if top_floor<L2[i]:
            top_floor=L2[i]
        if bottom_floor>L2[i]:
            bottom_floor=L2[i]
    print("--------------------------------------")
    lift = StateMachine()
    lift.add_state("Idle_state", idle)
    lift.add_state("Up_state", moving_up)
    lift.add_state("Down_state", moving_down)
    lift.add_state("Stop_state", None, end_state=1)
    currentState="Idle"
    lift.set_start("Idle_state")
    lift.run(floor)
Beispiel #15
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'comment': re.compile(r'\s*#.*'),
            'block-start':
            re.compile(r'(\s*)([a-zA-Z0-9-_]+):(?:\((.*?)\))?(.*)'),
            'codeblock-start': re.compile(r'(\s*)```(.*)'),
            'codeblock-end': re.compile(r'(\s*)```\s*$'),
            'paragraph-start': re.compile(r'\w*'),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(r'\s*[a-zA-Z0-9-_]+::(.*)'),
            'list-item': re.compile(r'(\s*)(\*\s+)(.*)'),
            'num-list-item': re.compile(r'(\s*)([0-9]+\.\s+)(.*)'),
            'block-insert': re.compile(r'(\s*)>>\(.*?\)\w*')
        }

    def parse(self, source):
        self.source = source
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise Exception(
                "Document ended before structure was complete. At:\n\n" +
                self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        if line[:4] == 'sam:':
            self.doc.new_root('sam', line[5:])
            return "SAM", source
        else:
            raise Exception("Not a SAM file!")

    def _block(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        indent = len(match.group(1))
        element = match.group(2).strip()
        attributes = match.group(3)
        content = match.group(4).strip()

        if content[:1] == ':':
            return "RECORD-START", source
        else:
            self.doc.new_block(element, attributes, content, indent)
            return "SAM", source

    def _codeblock_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        match = self.patterns['codeblock-start'].match(line)
        attributes = re.compile(r'\((.*?)\)').match(match.group(2).strip())
        language = attributes.group(1)
        self.doc.new_block('codeblock', language, None, local_indent)
        self.pre_start('')
        return "CODEBLOCK", source

    def _codeblock(self, source):
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", source
        else:
            self.pre_append(line)
            return "CODEBLOCK", source

    def _paragraph_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_block('p', None, '', local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", source

    def _paragraph(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            para_parser.parse(self.current_paragraph, self.doc)
            return "SAM", source
        else:
            self.paragraph_append(line)
            return "PARAGRAPH", source

    def _list_item(self, source):
        line = source.currentLine
        match = self.patterns['list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_unordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _num_list_item(self, source):
        line = source.currentLine
        match = self.patterns['num-list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_ordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _block_insert(self, source):
        line = source.currentLine
        indent = len(source.currentLine) - len(source.currentLine.lstrip())
        attribute_pattern = re.compile(r'\s*>>\((.*?)\)')
        match = attribute_pattern.match(line)
        self.doc.new_block('insert',
                           text='',
                           attributes=parse_insert(match.group(1)),
                           indent=indent)
        return "SAM", source

    def _record_start(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        local_indent = len(match.group(1))
        local_element = match.group(2).strip()
        field_names = [
            x.strip() for x in self.patterns['record-start'].match(line).group(
                1).split(',')
        ]
        self.doc.new_record_set(local_element, field_names, local_indent)
        return "RECORD", source

    def _record(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            return "SAM", source
        else:
            field_values = [x.strip() for x in line.split(',')]
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", source

    def _sam(self, source):
        try:
            line = source.next_line
        except EOFError:
            return "END", source
        if self.patterns['comment'].match(line):
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", source
        elif self.patterns['block-start'].match(line):
            return "BLOCK", source
        elif self.patterns['blank-line'].match(line):
            return "SAM", source
        elif self.patterns['codeblock-start'].match(line):
            return "CODEBLOCK-START", source
        elif self.patterns['list-item'].match(line):
            return "LIST-ITEM", source
        elif self.patterns['num-list-item'].match(line):
            return "NUM-LIST-ITEM", source
        elif self.patterns['block-insert'].match(line):
            return "BLOCK-INSERT", source
        elif self.patterns['paragraph-start'].match(line):
            return "PARAGRAPH-START", source
        else:
            raise Exception("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)
    global proceso_servos

    # Ventana de control de los servomotores #
    if servos_enable == False:
        servos_enable = True
        proceso_servos = subprocess.Popen(['python','control_camera_position.py'])
    else:
        msgbox("El programa de control de la cámara ya está abierto.", title="Atención")

    return("STATE_Buttons",txt)
    

    
    

# Funcion MAIN 
if __name__== "__main__":

    # Creamos la FSM #
    m = StateMachine()
    m.add_state("STATE_Welcome", start_window)
    m.add_state("STATE_Buttons", buttons_window)
    m.add_state("STATE_Camera", camera_handler)
    m.add_state("STATE_Graficos", graficos_hanlder)
    m.add_state("STATE_Exit", exit_function)
    m.add_state("STATE_R_U_SURE", are_you_sure_function)
    m.add_state("STATE_cameracontrol", cameracontrol_function)
    m.add_state("Bye_state",None,end_state=1)
    m.set_start("STATE_Welcome")
    m.run("Exec")
        return (newState, command, nparray)
    else:
        return (newState, command)


def process_transition(data):
    global nparray
    nparray = 5 * np.array(nparray)
    print("printing escalar")
    print(nparray)
    nparray = nparray.transpose()
    print("printing transpose ")
    print(nparray)
    command = input(" Type the next Command: ")
    if command == "Stop":
        newState = "Stopped"
    else:
        newState = "Collecting"
    print(newState)
    return (newState, command)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Stopped", stop_transition)
    m.add_state("Started", start_transition)
    m.add_state("Collecting", collect_transition)
    m.add_state("Processing", process_transition)
    m.set_start("Stopped")
    m.run("")
Beispiel #18
0
class ParserCmdLine:
	def __init__(self,argv):
		#self.argv=argv
		self.params={'-h':'localhost','-d':'','-s':'','-u':'SYSDBA','-p':'masterkey'}
		self.ch='UTF8'
		self.ch=self.detect_codecs()
		self.mydb=None
		self.cmd=None
		self.sm = StateMachine()
		self.create_state(self.sm)
		self.parse_cmd(argv)
		

	def bad_parameters(self):
		print u'Ошибка при вводе параметров'
		self.help()

	def help(self):
		print u'''Использование: 
setup.py i [-h host] [-d database] [-s dsn] [-u user] [-p password] 
setup.py c [-h host] [-d database] [-s dsn] [-u user] [-p password] 
Комманды:
	i	- создать в БД структуры Индексатора, 
			требует файл indexator.sql
	c	- удалить из БД структуры Индексатора
	u	- обновить в БД структуры Индексатора, 
			требует файл update_xxxx.sql
Параметры:
	-h host 	- имя сервера БД
	-d database - алиас или путь к базе данных на сервере
	-s dsn 		- источник данных в формате host:database
	-u user 	- имя пользователя (по умолчанию SYSDBA)
	-p password - пароль (по умолчанию masterkey)
		'''.encode(self.ch)
		
	def no_connect(self):
		print u"Ошибка подключения! Проверте параметры подключения и доступность сервера.".encode(self.ch)   
		self.cmd=''

		
	def connect_db(self):
		try:
			if (self.params['-s']==''):# and self.params['-d']!=''):
				print self.params
				self.mydb=kdb.connect(host=self.params['-h'],database=self.params['-d'],user=self.params['-u'], password=self.params['-p'],dialect=3, charset='UTF8' )
			else:
				self.mydb=kdb.connect(dsn=self.params['-s'],user=self.params['-u'], password=self.params['-p'],dialect=3, charset='UTF8' )
			return self.mydb
		except:
			print 'Except'
			self.no_connect()
			return None

	def detect_codecs(self):
		if sys_name=='nt':
			return 'CP866'
		else:
			return 'UTF8'
	
	def parse_cmd(self,argv):
		is_parse=1
		i=0
		cmd=''
		print len(argv)
		if len(argv)>1:
			try:
				while i<len(argv):
					print argv[i],i,argv[i] in ['i','c','u'] 
					if argv[i] in ['i','c','u']:
						cmd=argv[i]
						print cmd
						i+=1
					elif self.params.has_key(argv[i]):
						self.params[argv[i]]=argv[i+1]
						print argv[i+1]
						i+=2
					else:
						print i
						self.bad_parameters()
						is_parse=0
						break
				print self.params
			except:
				self.bad_parameters()
				is_parse=0
			else: 
				if is_parse: 
					self.cmd=cmd
					self.mydb=self.connect_db()
		else:
			self.help()


	def create_state(self,m):
			m.add_state('c', self.clean_db) #, 'TERM','EXT_TERM','COMMIT','SKIP'
			m.add_state('cid', self.clean_db) #clean after install
			m.add_state('id', self.create_db_structures)
			m.add_state('d', self.create_dictionares)
			m.add_state('u',self.update_db)
			m.add_state('ERROR',self.error)
			m.add_state('',None,end_state=1)
			
	def run_command(self):
		if self.cmd:
			if self.cmd=='i': 
				cmd='cid'
			else:
				cmd=self.cmd
			self.sm.set_start(cmd)
			self.sm.run(cmd)
		
	
	def clean_db(self,val):
		import clean_db
		CC=clean_db.IndexerClean()
		if CC.clean(self.mydb)==1:
			newState=val[1:]
		else:
			newState='ERROR'
		return (newState, newState)
		
	def create_db_structures(self,val):
		from parse_script import ParserScript
		input_file=os.path.dirname(__file__)+'\\indexer.sql'
		#output_file=os.path.dirname(__file__)+'\\out.sql'
		parser=ParserScript(self.mydb,input_file)
		parser.run()
		if parser.State=='COMPLIT':
			newState=val[1:]
		else:
			newState='ERROR'
			print "Error in create structures"
		return (newState, newState)
		
	def create_dictionares(self,val):
		import dict_encode
		if dict_encode.run(self.mydb)==1:
			newState=val[1:]
		else:
			newState='ERROR'
			print "Error in create dictionaries"
		return (newState, newState)
	
	def update_db(self,val): 
		return('','')
	
	def error(self,val):
		print 'ERROR in state '+val
		newState=''
		return (newState, newState)
Beispiel #19
0
    ymlfile = open('/home/pi/github/DoorKeyPi/config.yml')
    cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
    ymlfile.close()

    # FSM setup
    service = DoorServoState(cfg)
    m = StateMachine()

    m.add_state('start', service.start_state)
    m.add_state('locking', service.locking_state)
    m.add_state('locked', service.locked_state)
    m.add_state('unlocking', service.unlocking_state)
    m.add_state('unlocked', service.unlocked_state)
    m.add_state('end', None, end_state=1)

    m.set_start('start')
    m.setup_run()

    # FSM loop
    handler = m.handlers[m.startState]
    lastState = 'start'
    currentState = 'start'

    try:
        while True:
            # update sensor
            service.update_sensor()

            # change state
            (newState) = handler(lastState)
            handler = m.handlers[newState.upper()]
Beispiel #20
0
class StateMachineSM:
    '''
    Implementation of the specific StateMachine following the need of our project
    '''
    memory = []
    m = {}

    def __init__(self):
        self.m = StateMachine()
        #Declaration of all the states
        self.m.add_state("Still_state", self.still_state_transitions)
        self.m.add_state("Moving_state", self.moving_state_transitions)
        self.m.add_state("Bumping_state", self.bumping_state_transitions)
        self.m.add_state("Holding_state", self.holding_state_transitions)
        self.m.add_state("Crash_state", self.crash_state_transitions)
        self.m.set_start("Still_state")

    #method for evaluating the OR of a list of variables
    @staticmethod
    def conditionOr(op, data, names, values):
        result = False
        for i, key in enumerate(names):
            if (op == ">"):
                result = result or abs(data[key]) > values[i]
            else:
                result = result or abs(data[key]) < values[i]
        return result

    #method for evaluating the AND of a list of variables
    @staticmethod
    def conditionAnd(op, data, names, values):
        result = True
        for i, key in enumerate(names):
            if (op == ">"):
                result = result and abs(data[key]) > values[i]
            else:
                result = result and abs(data[key]) < values[i]
        return result

    #Declaration of the transition from the Still state to the other possible states
    def still_state_transitions(self, data):
        if self.conditionAnd(">", data,
                             ["norm", "current", "Std_norm", "Std_current"],
                             [0.9, 5, 0.005, 0.8]):
            newState = "Moving_state"
        else:
            newState = "Still_state"
        return newState

    #Declaration of the transition from the Moving state to the other possible states
    def moving_state_transitions(self, data):
        if (data["normOut"] * data["currentOut"]) < 40:
            newState = "Crash_state"
        elif self.conditionAnd(">", data, ["Std_current", "Der_current"],
                               [15, 70]):
            newState = "Holding_state"
        elif self.conditionAnd("<", data,
                               ["norm", "current", "Std_norm", "Std_current"],
                               [0.9, 5, 0.005, 0.8]):
            newState = "Still_state"
        else:
            newState = "Moving_state"
        return newState

    #Possible Implementation of the transition from the Bumping state to the other possible
    #states. It was never tested, it could be used in future works
    #def bumping_state_transitions(self, data):
    #    if self.conditionOr("<",data, ["Std_norm","Der_norm","Std_current","Der_current"], [0.1,0.5,4,20]):
    #        newState = "Moving_state"
    #    elif self.conditionOr("<",data, ["Std_norm","Der_norm","Std_current","Der_current"], [0.01,0.04,1,10]):
    #        newState = "Still_state"
    #    else:
    #        newState = "Bumping_state"
    #    return newState

    #Declaration of the transition from the Holding state to the other possible states
    def holding_state_transitions(self, data):
        if self.conditionAnd("<", data, ["Std_current", "Der_current"],
                             [15, 70]):
            newState = "Moving_state"
        elif self.conditionAnd("<", data,
                               ["norm", "current", "Std_norm", "Std_current"],
                               [0.9, 5, 0.005, 0.8]):
            newState = "Still_state"
        else:
            newState = "Holding_state"
        return newState

    #Declaration of the transition from the Crash state to the other possible states
    def crash_state_transitions(self, data):
        if self.conditionAnd("<", data,
                             ["norm", "current", "Std_norm", "Std_current"],
                             [0.9, 5, 0.005, 0.8]):
            newState = "Still_state"
        else:
            newState = "Crash_state"
        return newState

    def runOneStep(self, data):
        return self.m.runOneStep(data)
    throw = throws[0]
    throws = throws[1:]
    newState = "000"
    if throw == "3":
        newState = "111"
    elif throw == "4":
        newState = "1101"
    elif throw == "5":
        newState = "11001"

    return (newState, throws)


def s_1101_transitions(throws):
    throw = throws[0]
    throws = throws[1:]
    newState = "000"
    if throw == "2":
        newState = "111"

    return (newState, throws)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("111", s_111_transitions)
    m.add_state("1101", s_1101_transitions)
    m.set_start("111")
    m.run("42", 2)
    m.run("423", 3)
    currentPos = m.currentpos
    msg = json.dumps(currentPos)
    msg = m.tractor.encrypt(msg)
    xmpp.send_message(mto="*****@*****.**", mbody=msg)

    #time.sleep(1)
    
    newState = "AT_REST"
    print "///////////////////////////////////////////"
    return (newState, cargo)

if __name__ == '__main__':
	try:
	    q = Queue(1)     
	    xmpp = EchoBot('*****@*****.**', 'Q9MTZx14we',q)
	    xmpp.connect()
	    xmpp.process(block=False)
	    m = StateMachine(xmpp, q)
	    
	    m.add_state("AT_REST", at_rest)
	    m.add_state("DOWNLOAD_PATH", download_path)
	    m.add_state("EXECUTE_PATH", execute_path)
	    m.add_state("UPLOAD_DATA", upload_data)
	    m.add_state("SHUTTING_DOWN", shutdown)
	    m.add_state("OFF", None, end_state = 1)
	    
	    m.set_start("AT_REST")
	    m.run(1)
	finally:
	    xmpp.disconnect()
Beispiel #23
0
                                FORWARD_VISION_SERVER_ID)
    client.registerRemoteBuffer(TARGET_LOCATION, DOWNWARD_VISION_SERVER_IP,
                                DOWNWARD_VISION_SERVER_ID)
    client.registerRemoteBuffer(TARGET_LOCATION, SONAR_SERVER_IP,
                                SONAR_SERVER_ID)
    client.registerRemoteBuffer(MOTOR_KILL, MOTOR_SERVER_IP, MOTOR_SERVER_ID)
    time.sleep(1)

    print("Creating State Machine")
    m = StateMachine()

    m.add_state("Start", start_transitions)
    m.add_state("Kill", kill_transitions)
    m.add_state("IsKilled", iskilled_transitions)
    m.add_state("GateDeadReckon", gatedr_transitions)
    m.add_state("GateVisionFeedback", gatevisionfeed_transitions)
    m.add_state("GateVision", gatevision_transitions)
    m.add_state("PathFinder", pathfinder_transitions)
    m.add_state("PathOrientation", pathorient_transitions)
    m.add_state("SetDepth", set_depth_transitions)
    m.add_state("BuoyDeadReckon", buoydr_transitions)
    m.add_state("CheckBuoy", checkbuoy_transitions)
    m.add_state("BuoyVision", buoyvision_transitions)
    m.add_state("SonarFinder", sonarfinder_transitions)
    m.add_state("SonarOrientation", sonarorient_transitions)
    m.add_state("OctoDeadReckon", sonardr_transitions)
    m.add_state("Error", None, end_state=1)
    m.add_state("EndOfRun", None, end_state=1)

    m.set_start("Kill")
    m.run("PLACEHOLDER")
Beispiel #24
0
    curses.cbreak()

    # map arrow keys to special values
    screen.keypad(True)

    mode = "awake"

    try:
        menu = StateMachine()
        menu.add_state("MeasureScreen", measure_screen)
        menu.add_state("MeasureSleep", measure_sleep)
        menu.add_state("MenuMeasure", menu_measure)
        menu.add_state("MenuScreenOff", menu_screen_off)
        menu.add_state("ScreenOff", screen_off)
        menu.add_state("MenuCalibrate", menu_calibrate)
        menu.add_state("Calibrate", calibrate)
        menu.add_state("MenuTurnOff", menu_turn_off)

        #menu.add_state("")

        menu.add_state("TurnOff", None, end_state=1)

        menu.set_start("MeasureScreen")
        menu.run("None")

    finally:
        curses.nocbreak()
        screen.keypad(0)
        curses.echo()
        curses.endwin()
	return (newState,restStr,detailList)



if __name__ == "__main__":
	m = StateMachine()
	m.add_state("start_state",start_transitions)
	m.add_state("province_state",province_transitions)
	m.add_state("city_state",city_transitions)
	m.add_state("region_state",region_transitions)
	m.add_state("state_state",state_transitions)
	m.add_state("town_state",town_transitions,end_state=1)
	m.add_state("street_state",street_transitions,end_state = 1)
	m.add_state("doorplate_state",None,end_state = 1)

	m.set_start("start_state")

	#m.process("浙江杭州市西湖区城区文三路黄龙国际G座18层")
	#m.process("浙江省杭州市西湖区城区文三路黄龙国际G座18层")
	#m.process("北京市北三环东路8号静安中心大厦")
	#m.process("黑龙江省哈尔滨市呼兰区南京路美兰家园5栋2单元303")
	#m.process("广东省深圳市罗湖区金稻田路1228号理想新城9栋A单元301室")
	#m.process("新疆维吾尔自治区昌吉回族自治州昌吉市昌吉市建国西路甜蜜家园9-1-301")
	#m.process("北京市北京市大兴区黄村镇海子角海悦公馆41号楼4单元602")
	#m.process("陕西省宝鸡市千阳县南关路粮食小区")
	#m.process("黑龙江省鸡西市虎林市黑龙江省虎林市公安南街276号")
	#m.process("辽宁省大连市金州区站前街道生辉第一城物业")
	#m.process("安徽省芜湖市无为县高沟镇龙庵街道")
	#m.process("广东省深圳市南山区科兴科学园A3单元12楼")
	#m.process("湖北省黄冈市浠水县散花镇涂墩村七组")
	for x in open("sample_address.txt"):
class StateMachineImplementation:
    '''
    Implementation of the specific StateMachine following the need of our project
    '''
    memory = []
    m = {}

    def __init__(self):
        self.m = StateMachine()
        #Declaration of all the states
        self.m.add_state("StateA", self.StateA_transitions)
        self.m.add_state("StateB", self.StateB_transitions)
        self.m.add_state("StateC", self.StateC_transitions)
        self.m.set_start("StateA")

    #method for evaluating the OR of a list of variables
    @staticmethod
    def conditionOr(op, data, names, values):
        result = False
        for i, key in enumerate(names):
            if (op == ">"):
                result = result or abs(data[key]) > values[i]
            else:
                result = result or abs(data[key]) < values[i]
        return result

    #method for evaluating the AND of a list of variables
    @staticmethod
    def conditionAnd(op, data, names, values):
        result = True
        for i, key in enumerate(names):
            if (op == ">"):
                result = result and abs(data[key]) > values[i]
            else:
                result = result and abs(data[key]) < values[i]
        return result

    #Declaration of the transition from the StateA to the other possible states
    def StateA_transitions(self, data):
        if self.conditionAnd(">", data, ["a", "v", "c", "d"],
                             [0.9, 5, 0.005, 0.8]):
            newState = "StateB"
        else:
            newState = "StateC"
        return newState

    #Declaration of the transition from the StateB to the other possible states
    def StateB_transitions(self, data):
        if (data["a"] * data["b"]) < 40:
            newState = "StateA"
        elif self.conditionAnd(">", data, ["c", "d"], [15, 70]):
            newState = "StateB"
        elif self.conditionAnd("<", data, ["a", "b", "c", "d"],
                               [0.9, 5, 0.005, 0.8]):
            newState = "StateC"
        else:
            newState = "StateA"
        return newState

    #Declaration of the transition from the StateC to the other possible states
    def StateC_transitions(self, data):
        newState = "StateA"
        return newState

    def runOneStep(self, data):
        return self.m.runOneStep(data)
Beispiel #27
0
__time__ = '2016/11/1'
from statemachine import StateMachine


def ones_counter(val):
    print "ONES State:    ",
    while 1:
        if val <= 0 or val >= 30:
            newState = "Out_of_Range"
            break
        else:
            print "  @ %2.1f+" % val,
    print "  >>"
    return (newState, val)


def test(val):
    print val


def test1(val):
    print val


if __name__ == '__main__':
    stat = StateMachine()
    stat.add_state("test", test)
    stat.add_state("test1", test)
    stat.set_start("test")
    stat.add_state("OUT_OF_RANGE", None, end_state=1)
    stat.run(1)
Beispiel #28
0
    return (newState, txt)


def not_state_transitions(txt):
    splitted_txt = txt.split(None, 1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt, "")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)  # 添加初始状态
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)  # 添加最终状态
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)

    m.set_start("Start")  # 设置开始状态
    m.run("Python is great")
    m.run("Python is not fun")
    m.run("Perl is ugly")
    m.run("Pythoniseasy")
def not_state_transitions(txt):
    splitted_txt = txt.split(None, 1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt, "")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)


def neg_state(txt):
    print("Hallo")
    return ("neg_state", "")


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is great")
    m.run("Python is difficult")
    m.run("Perl is ugly")
        else:
            val = math_func(val)
    return (newState, val)

def twenties_counter(val):
    while True:
        if val <= 0 or val >= 30:
            newState = STATE_OUT_OF_RANGE
            break
        elif 1 <= val < 10:
            newState = STATE_ONES
            break
        elif 10 <= val < 20:
            newState = STATE_TENS
            break
        else:
            val = math_func(val)
    return (newState, val)

def math_func(n):
    return abs(math.sin(n))*31

if __name__== '__main__':
    m = StateMachine()
    m.add_state(STATE_ONES, ones_counter, end_state=False)
    m.add_state(STATE_TENS, tens_counter, end_state=False)
    m.add_state(STATE_TWENTIES, twenties_counter, end_state=False)
    m.add_state(STATE_OUT_OF_RANGE, None, end_state=True)
    m.set_start(STATE_ONES)
    m.run(1)
            val = math_func(val)
        print " >>"
    return (newState, val)

def twenties_counter(val):
    print "TWENTIES State:",
    while 1:
        if val <= 0  or  val >= 30:
            newState =  "Out_of_Range"; break
        elif 1 <= val < 10:
            newState =  "ONES"; break
        elif 10 <= val < 20:
            newState =  "TENS"; break
        else:
            print " *%2.1f+" % val,
            val = math_func(val)
        print " >>"
    return (newState, val)

def math_func(n):
     from math import sin
     return abs(sin(n))*31

if __name__== "__main__":
       m = StateMachine()
       m.add_state("ONES", ones_counter)
       m.add_state("TENS", tens_counter)
       m.add_state("TWENTIES", twenties_counter)
       m.add_state("OUT_OF_RANGE", None, end_state=1)
       m.set_start("ONES")
       m.run(1)
Beispiel #32
0
    print 'player 2 wins!'
    newState = 'game_end'

    return(newState, inpList)


def game_end(inpList):
    print 'End of game!'
    return None


##########################################################################
# Setting up game:
allTiles = []
allTiles.extend(User1.tiles)
allTiles.extend(User2.tiles)

if __name__ == "__main__":
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("GameOver", game_end)
    m.add_state("Out of range", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)

def twenties_counter(val):
    while True:
        if val <= 0 or val >= 30:
            newState = STATE_OUT_OF_RANGE
            break
        elif 1 <= val < 10:
            newState = STATE_ONES
            break
        elif 10 <= val < 20:
            newState = STATE_TENS
            break
        else:
            val = math_func(val)
    return (newState, val)


def math_func(n):
    return abs(math.sin(n)) * 31


if __name__ == '__main__':
    m = StateMachine()
    m.add_state(STATE_ONES, ones_counter, end_state=False)
    m.add_state(STATE_TENS, tens_counter, end_state=False)
    m.add_state(STATE_TWENTIES, twenties_counter, end_state=False)
    m.add_state(STATE_OUT_OF_RANGE, None, end_state=True)
    m.set_start(STATE_ONES)
    m.run(1)
Beispiel #34
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("MONO-START", self._mono_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            "escape": re.compile(r"\\"),
            "escaped-chars": re.compile(r"[\\\(\{\}\[\]_\*,`]"),
            "annotation": re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\))?'
            ),
            "bold": re.compile(r"\*(?P<text>\S.+?\S)\*"),
            "italic": re.compile(r"_(?P<text>\S.*?\S)_"),
            "mono": re.compile(r"`(?P<text>\S.*?\S)`"),
            "quotes": re.compile(r'"(?P<text>\S.*?\S)"'),
            "inline-insert": re.compile(r">>\((?P<attributes>.*?)\)"),
            "citation": re.compile(
                r"(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])"
            ),
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ""
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ""
            return "END", para
        if char == "\\":
            return "ESCAPE", para
        elif char == "{":
            return "ANNOTATION-START", para
        elif char == "[":
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "MONO-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns["annotation"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            annotation_type = match.group("type")
            text = match.group("text")

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else raise an exception.
                    else:
                        raise SAMParserError(
                            "Blank annotation found: {"
                            + text
                            + "} "
                            + "If you are trying to insert curly braces "
                            + "into the document, use \{"
                            + text
                            + "]. Otherwise, make sure annotated text matches "
                            "previous annotation exactly."
                        )
            elif annotation_type.strip() == "":
                raise SAMParserError("Annotation type cannot be blank: " + match.group(0))
            else:
                # Check for link shortcut
                if urlparse(annotation_type, None).scheme is not None:
                    specifically = annotation_type
                    annotation_type = "link"
                else:
                    specifically = match.group("specifically") if match.group("specifically") is not None else None
                namespace = match.group("namespace").strip() if match.group("namespace") is not None else None
                self.flow.append(Annotation(annotation_type.strip(), text, specifically, namespace))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += "{"
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns["citation"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""

            try:
                idref = match.group("id")
            except IndexError:
                idref = None
            try:
                nameref = match.group("name")
            except IndexError:
                nameref = None
            try:
                citation = match.group("citation")
            except IndexError:
                citation = None

            if idref:
                citation_type = "idref"
                citation_value = idref.strip()
                extra = match.group("id_extra")
            elif nameref:
                citation_type = "nameref"
                citation_value = nameref.strip()
                extra = match.group("name_extra")
            else:
                citation_type = "citation"
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += "["
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns["bold"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("bold", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "*"
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns["italic"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("italic", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "_"
        return "PARA", para

    def _mono_start(self, para):
        match = self.patterns["mono"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("mono", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "`"
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns["quotes"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("quotes", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns["inline-insert"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += ">"
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns["inline-insert_id"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(InlineInsert("reference", match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += ">"
        return "PARA", para

    def _escape(self, para):
        char = para.next_char
        if self.patterns["escaped-chars"].match(char):
            self.current_string += char
        else:
            self.current_string += "\\" + char
        return "PARA", para
Beispiel #35
0
            newState = "pos_state"
        elif word in negative_adjectives:
            newState = "neg_state"
        else:
            newState = "error_state"
        return (newState, txt)
    elif state == "not_state":
        if word in positive_adjectives:
            newState = "neg_state"
        elif word in negative_adjectives:
            newState = "pos_state"
        else:
            newState = "error_state"
        return (newState, txt)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", transitions)
    m.add_state("Python_state", transitions)
    m.add_state("is_state", transitions)
    m.add_state("not_state", transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is great")
    """
    m.run("Python is difficult")
    m.run("Perl is ugly")
    """
Beispiel #36
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'comment': re.compile(r'\s*#.*'),
            'block-start': re.compile(r'(\s*)([a-zA-Z0-9-_]+):(?:\((.*?)\))?(.*)'),
            'codeblock-start': re.compile(r'(\s*)```(.*)'),
            'codeblock-end': re.compile(r'(\s*)```\s*$'),
            'paragraph-start': re.compile(r'\w*'),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(r'\s*[a-zA-Z0-9-_]+::(.*)'),
            'list-item': re.compile(r'(\s*)(\*\s+)(.*)'),
            'num-list-item': re.compile(r'(\s*)([0-9]+\.\s+)(.*)'),
            'block-insert': re.compile(r'(\s*)>>\(.*?\)\w*')
        }

    def parse(self, source):
        self.source = source
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise Exception("Document ended before structure was complete. At:\n\n"
                            + self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        if line[:4] == 'sam:':
            self.doc.new_root('sam', line[5:])
            return "SAM", source
        else:
            raise Exception("Not a SAM file!")

    def _block(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        indent = len(match.group(1))
        element = match.group(2).strip()
        attributes = match.group(3)
        content = match.group(4).strip()

        if content[:1] == ':':
            return "RECORD-START", source
        else:
            self.doc.new_block(element, attributes, content, indent)
            return "SAM", source

    def _codeblock_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        match = self.patterns['codeblock-start'].match(line)
        attributes = re.compile(r'\((.*?)\)').match(match.group(2).strip())
        language = attributes.group(1)
        self.doc.new_block('codeblock', language, None, local_indent)
        self.pre_start('')
        return "CODEBLOCK", source

    def _codeblock(self, source):
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", source
        else:
            self.pre_append(line)
            return "CODEBLOCK", source

    def _paragraph_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_block('p', None, '', local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", source

    def _paragraph(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            para_parser.parse(self.current_paragraph, self.doc)
            return "SAM", source
        else:
            self.paragraph_append(line)
            return "PARAGRAPH", source

    def _list_item(self, source):
        line = source.currentLine
        match = self.patterns['list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_unordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source


    def _num_list_item(self, source):
        line = source.currentLine
        match = self.patterns['num-list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_ordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _block_insert(self, source):
        line = source.currentLine
        indent = len(source.currentLine) - len(source.currentLine.lstrip())
        attribute_pattern = re.compile(r'\s*>>\((.*?)\)')
        match = attribute_pattern.match(line)
        self.doc.new_block('insert', text='', attributes=parse_insert(match.group(1)), indent=indent)
        return "SAM", source

    def _record_start(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        local_indent = len(match.group(1))
        local_element = match.group(2).strip()
        field_names = [x.strip() for x in self.patterns['record-start'].match(line).group(1).split(',')]
        self.doc.new_record_set(local_element, field_names, local_indent)
        return "RECORD", source

    def _record(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            return "SAM", source
        else:
            field_values = [x.strip() for x in line.split(',')]
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", source

    def _sam(self, source):
        try:
            line = source.next_line
        except EOFError:
            return "END", source
        if self.patterns['comment'].match(line):
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", source
        elif self.patterns['block-start'].match(line):
            return "BLOCK", source
        elif self.patterns['blank-line'].match(line):
            return "SAM", source
        elif self.patterns['codeblock-start'].match(line):
            return "CODEBLOCK-START", source
        elif self.patterns['list-item'].match(line):
            return "LIST-ITEM", source
        elif self.patterns['num-list-item'].match(line):
            return "NUM-LIST-ITEM", source
        elif self.patterns['block-insert'].match(line):
            return "BLOCK-INSERT", source
        elif self.patterns['paragraph-start'].match(line):
            return "PARAGRAPH-START", source
        else:
            raise Exception("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)
Beispiel #37
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("CODE-START", self._code_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.add_state("CHARACTER-ENTITY", self._character_entity)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            'escape':
            re.compile(r'\\', re.U),
            'escaped-chars':
            re.compile(r'[\\\(\{\}\[\]_\*,\.\*`"&]', re.U),
            'annotation':
            re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\s*(~(?P<language>[\w-]+))?\))?',
                re.U),
            'bold':
            re.compile(r'\*(?P<text>((?<=\\)\*|[^\*])*)(?<!\\)\*', re.U),
            'italic':
            re.compile(r'_(?P<text>((?<=\\)_|[^_])*)(?<!\\)_', re.U),
            'code':
            re.compile(r'`(?P<text>(``|[^`])*)`', re.U),
            'quotes':
            re.compile(r'"(?P<text>((?<=\\)"|[^"])*)(?<!\\)"', re.U),
            'inline-insert':
            re.compile(r'>\((?P<attributes>.*?)\)', re.U),
            'character-entity':
            re.compile(r'&(\#[0-9]+|#[xX][0-9a-fA-F]+|[\w]+);'),
            'citation':
            re.compile(
                r'(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])',
                re.U)
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ''
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ''
            return "END", para
        if char == '\\':
            return "ESCAPE", para
        elif char == '{':
            return "ANNOTATION-START", para
        elif char == '[':
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "CODE-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        elif char == "&":
            return "CHARACTER-ENTITY", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns['annotation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            annotation_type = match.group('type')
            language = match.group('language')
            text = self._unescape(match.group("text"))

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None and not language:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else output a warning.
                    else:
                        self.current_string += text
                        SAM_parser_warning(
                            "Blank annotation found: {" + text + "} " +
                            "If you are trying to insert curly braces " +
                            "into the document, use \{" + text +
                            "]. Otherwise, make sure annotated text matches "
                            "previous annotation exactly.")
            else:
                #Check for link shortcut
                if urlparse(annotation_type, None).scheme is not None:
                    specifically = annotation_type
                    annotation_type = 'link'
                else:
                    specifically = match.group('specifically') if match.group(
                        'specifically') is not None else None
                namespace = match.group('namespace').strip() if match.group(
                    'namespace') is not None else None
                self.flow.append(
                    Annotation(annotation_type, text, specifically, namespace,
                               language))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '{'
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns['citation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''

            try:
                idref = match.group('id')
            except IndexError:
                idref = None
            try:
                nameref = match.group('name')
            except IndexError:
                nameref = None
            try:
                citation = match.group('citation')
            except IndexError:
                citation = None

            if idref:
                citation_type = 'idref'
                citation_value = idref.strip()
                extra = match.group('id_extra')
            elif nameref:
                citation_type = 'nameref'
                citation_value = nameref.strip()
                extra = match.group('name_extra')
            else:
                citation_type = 'citation'
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '['
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns['bold'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('bold', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '*'
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns['italic'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('italic', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '_'
        return "PARA", para

    def _code_start(self, para):
        match = self.patterns['code'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('code', (match.group("text")).replace("``", "`")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '`'
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns['quotes'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('quotes', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns['inline-insert'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns['inline-insert_id'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert('reference', match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _character_entity(self, para):
        match = self.patterns['character-entity'].match(para.rest_of_para)
        if match:
            self.current_string += self.patterns['character-entity'].sub(
                self._replace_charref, match.group(0))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '&'
        return "PARA", para

    def _replace_charref(self, match):
        try:
            charref = match.group(0)
        except AttributeError:
            charref = match
        character = html.unescape(charref)
        if character == charref:  # Escape not recognized
            raise SAMParserError("Unrecognized character entity found: " +
                                 charref)
        return character

    def _escape(self, para):
        char = para.next_char
        if self.patterns['escaped-chars'].match(char):
            self.current_string += char
        else:
            self.current_string += '\\' + char
        return "PARA", para

    def _unescape(self, string):
        result = ''
        e = enumerate(string)
        for pos, char in e:
            try:
                if char == '\\' and self.patterns['escaped-chars'].match(
                        string[pos + 1]):
                    result += string[pos + 1]
                    next(e, None)
                elif char == '&':
                    match = self.patterns['character-entity'].match(
                        string[pos:])
                    if match:
                        result += self.patterns['character-entity'].sub(
                            self._replace_charref, match.group(0))
                        for i in range(0, len(match.group(0))):
                            next(e, None)
                    else:
                        result += char
                else:
                    result += char
            except IndexError:
                result += char
        return result
Beispiel #38
0
        reason = 'EHTML'
        newstate = 'error_state'
        print reason,i
    return (newstate,txt)
    

    
if __name__=='__main__':
    m = StateMachine()
    m.add_state('orig',doctype_way)
    m.add_state('doctype_state',doctype_state_way )
    m.add_state("dhtml_state",dhtml_state_way)
    m.add_state("shtml_state",shtml_state_way)
    m.add_state("shead_state",shead_state_way)
    m.add_state("stitle_state",stitle_state_way)
    m.add_state("etitle_state",etitle_state_way)
    m.add_state("ehead_state",ehead_state_way)
    m.add_state("sbody_state",sbody_state_way)
    m.add_state("sp_state",sp_state_way)
    m.add_state("ep_state",ep_state_way)
    m.add_state("sa_state",sa_state_way)
    m.add_state("ea_state",ea_state_way)
    m.add_state("ebody_state",ebody_state_way)
    m.add_state("finstate",None,end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start('orig')
    m.run(open('1.txt').read())
    
relink = re.compile(r'<A href="(.*)".*>')
print relink.findall(open('1.txt').read())
Beispiel #39
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM",
                                    self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_text_block = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'sam-declaration':
            re.compile(
                r'sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?',
                re.U),
            'comment':
            re.compile(re_indent + re_comment, re.U),
            'block-start':
            re.compile(
                re_indent + re_name + r':' + re_attributes + re_content + r'?',
                re.U),
            'codeblock-start':
            re.compile(
                re_indent +
                r'(?P<flag>```[^\s\(]*)(\((?P<language>\S*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?',
                re.U),
            'blockquote-start':
            re.compile(
                re_indent + r'("""|\'\'\'|blockquote:)' + re_attributes +
                r'((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?',
                re.U),
            'fragment-start':
            re.compile(re_indent + r'~~~' + re_attributes, re.U),
            'paragraph-start':
            re.compile(r'\w*', re.U),
            'line-start':
            re.compile(
                re_indent + r'\|' + re_attributes + re_one_space + re_content,
                re.U),
            'blank-line':
            re.compile(r'^\s*$'),
            'record-start':
            re.compile(re_indent + re_name + r'::(?P<field_names>.*)', re.U),
            'list-item':
            re.compile(
                re_indent + re_ul_marker + re_attributes + re_spaces +
                re_content, re.U),
            'num-list-item':
            re.compile(
                re_indent + re_ol_marker + re_attributes + re_spaces +
                re_content, re.U),
            'labeled-list-item':
            re.compile(
                re_indent + re_ll_marker + re_attributes + re_spaces +
                re_content, re.U),
            'block-insert':
            re.compile(re_indent + r'>>>' + re_attributes, re.U),
            'string-def':
            re.compile(re_indent + r'\$' + re_name + '=' + re_content, re.U),
            'embedded-xml':
            re.compile(re_indent + r'(?P<xmltag>\<\?xml.+)', re.U)
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError(
                "Document ended before structure was complete.")

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns['sam-declaration'].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        block_name = match.group("name").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        parsed_content = None if content == '' else para_parser.parse(
            content, self.doc)
        self.doc.new_block(block_name, attributes, parsed_content, indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns['codeblock-end'] = re.compile(r'(\s*)' + codeblock_flag +
                                                    '\s*$')

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes['language'] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block('codeblock', attributes, None, indent)
        self.current_text_block = TextBlock()
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_text_block))
            self.current_text_block = None
            return "SAM", context
        else:
            self.current_text_block.append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        # TODO: Refactor this with the paraparser version

        extra = source.current_line.rstrip()[len(match.group(0)):]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " +
                                 extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block('blockquote', attributes, None, indent)

        #see if there is a citation
        try:
            idref = match.group('id')
        except IndexError:
            idref = None
        try:
            nameref = match.group('name')
        except IndexError:
            nameref = None
        try:
            citation = match.group('citation')
        except IndexError:
            citation = None

        if idref:
            citation_type = 'idref'
            citation_value = idref.strip()
            extra = match.group('id_extra')
        elif nameref:
            citation_type = 'nameref'
            citation_value = nameref.strip()
            extra = match.group('name_extra')
        elif citation:
            citation_type = 'citation'
            citation_value = citation.strip()
        else:
            citation_type = None

        if citation_type:
            cit = (Citation(citation_type, citation_value, extra))
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block('fragment', attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, '', local_indent)
        self.current_text_block = TextBlock(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "END", context

        if self.patterns['blank-line'].match(line):
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(['p', 'li']):
            if self.patterns['list-item'].match(
                    line) or self.patterns['num-list-item'].match(
                        line) or self.patterns['labeled-list-item'].match(
                            line):
                f = para_parser.parse(self.current_text_block.text, self.doc)
                self.current_text_block = None
                self.doc.new_flow(f)
                source.return_line()
                return "SAM", context

        self.current_text_block.append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_unordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_ordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_labeled_list_item(attributes, indent, label)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert",
                           attributes=parse_insert(match.group("attributes")),
                           text=None,
                           indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group('name'),
                                para_parser.parse(match.group('content'),
                                                  self.doc),
                                indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block('line',
                           self.parse_block_attributes(
                               match.group("attributes")),
                           para_parser.parse(match.group('content'),
                                             self.doc,
                                             strip=False),
                           indent=indent)
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("name").strip()
        field_names = [
            x.strip() for x in match.group("field_names").split(',')
        ]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns['blank-line'].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r'(?<!\\),', line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError(
                    "Record length does not match record set header. At:\n\n "
                    + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler = self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code == 9:  #junk after document element
                source.return_line()
                xml_text = ''.join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError(
                "The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError(
                "The encoding of an embedded XML fragment must be UTF-8.")

    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns['comment'].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns['record-start'].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns['blank-line'].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns['codeblock-start'].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns['blockquote-start'].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns['fragment-start'].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns['list-item'].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns['num-list-item'].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns['labeled-list-item'].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns['block-insert'].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns['string-def'].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns['line-start'].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns['embedded-xml'].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns['block-start'].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns['paragraph-start'].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [
            x for x in attributes_list if not (x[0] in '?#*~')
        ]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(
                ', '.join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == '*']
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " +
                                 ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == '#']
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " +
                                 ", ".join(names))
        language = [x[1:] for x in attributes_list if x[0] == '~']
        if len(language) > 1:
            raise SAMParserError("More than one language specified: " +
                                 ", ".join(language))
        conditions = [x[1:] for x in attributes_list if x[0] == '?']
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if language:
            result["xml:lang"] = "".join(language)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Beispiel #40
0
        newState = "groen"
        time = 30
    else:
        newState = "rood"
        time = time - 1

    return (newState, time, car)


def timergroen(time, car):
    if time <= 0:
        newState = "rood"
        time = 30
    else:
        newState = "groen"
        time = time - 1

    if (car > 0):
        car = car - 1

    return (newState, time, car)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("rood", timerrood, end_state=1)
    m.add_state("groen", timergroen)
    m.set_start("rood")
    m.run(30, 0)

# https://www.python-course.eu/finite_state_machine.php
Beispiel #41
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM", self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_text_block = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'sam-declaration': re.compile(r'sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?', re.U),
            'comment': re.compile(re_indent + re_comment, re.U),
            'block-start': re.compile(re_indent + re_name + r':' + re_attributes + re_content + r'?', re.U),
            'codeblock-start': re.compile(re_indent + r'(?P<flag>```[^\s\(]*)(\((?P<language>\S*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?', re.U),
            'blockquote-start': re.compile(re_indent + r'("""|\'\'\'|blockquote:)' + re_attributes + r'((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?', re.U),
            'fragment-start': re.compile(re_indent + r'~~~' + re_attributes, re.U),
            'paragraph-start': re.compile(r'\w*', re.U),
            'line-start': re.compile(re_indent + r'\|' + re_attributes + re_one_space + re_content, re.U),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(re_indent + re_name + r'::(?P<field_names>.*)', re.U),
            'list-item': re.compile(re_indent + re_ul_marker + re_attributes + re_spaces + re_content, re.U),
            'num-list-item': re.compile(re_indent + re_ol_marker + re_attributes + re_spaces + re_content, re.U),
            'labeled-list-item': re.compile(re_indent + re_ll_marker + re_attributes + re_spaces + re_content, re.U),
            'block-insert': re.compile(re_indent + r'>>>' + re_attributes, re.U),
            'string-def': re.compile(re_indent + r'\$' + re_name + '=' + re_content, re.U),
            'embedded-xml': re.compile(re_indent + r'(?P<xmltag>\<\?xml.+)', re.U)
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError("Document ended before structure was complete.")

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns['sam-declaration'].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        block_name = match.group("name").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        parsed_content = None if content == '' else para_parser.parse(content, self.doc)
        self.doc.new_block(block_name, attributes, parsed_content, indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns['codeblock-end'] = re.compile(r'(\s*)' + codeblock_flag + '\s*$')

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes['language'] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block('codeblock', attributes, None, indent)
        self.current_text_block = TextBlock()
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_text_block))
            self.current_text_block = None
            return "SAM", context
        else:
            self.current_text_block.append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        # TODO: Refactor this with the paraparser version


        extra=source.current_line.rstrip()[len(match.group(0)):]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " + extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block('blockquote', attributes, None, indent)

        #see if there is a citation
        try:
            idref = match.group('id')
        except IndexError:
            idref=None
        try:
            nameref = match.group('name')
        except IndexError:
            nameref = None
        try:
            citation = match.group('citation')
        except IndexError:
            citation=None

        if idref:
            citation_type = 'idref'
            citation_value = idref.strip()
            extra = match.group('id_extra')
        elif nameref:
            citation_type = 'nameref'
            citation_value = nameref.strip()
            extra = match.group('name_extra')
        elif citation:
            citation_type = 'citation'
            citation_value = citation.strip()
        else:
            citation_type=None

        if citation_type:
            cit = (Citation(citation_type, citation_value, extra))
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block('fragment', attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, '', local_indent)
        self.current_text_block = TextBlock(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "END", context

        if self.patterns['blank-line'].match(line):
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(['p', 'li']):
            if self.patterns['list-item'].match(line) or self.patterns['num-list-item'].match(line) or self.patterns['labeled-list-item'].match(line):
                f = para_parser.parse(self.current_text_block.text, self.doc)
                self.current_text_block = None
                self.doc.new_flow(f)
                source.return_line()
                return "SAM", context

        self.current_text_block.append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_unordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_ordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_labeled_list_item(attributes, indent, label)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert", attributes=parse_insert(match.group("attributes")), text=None, indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group('name'), para_parser.parse(match.group('content'), self.doc), indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block('line', self.parse_block_attributes(match.group("attributes")), para_parser.parse(match.group('content'), self.doc, strip=False), indent=indent)
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("name").strip()
        field_names = [x.strip() for x in match.group("field_names").split(',')]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns['blank-line'].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r'(?<!\\),',line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError("Record length does not match record set header. At:\n\n " + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler=self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code==9: #junk after document element
                source.return_line()
                xml_text = ''.join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError("The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError("The encoding of an embedded XML fragment must be UTF-8.")



    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns['comment'].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns['record-start'].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns['blank-line'].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns['codeblock-start'].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns['blockquote-start'].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns['fragment-start'].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns['list-item'].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns['num-list-item'].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns['labeled-list-item'].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns['block-insert'].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns['string-def'].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns['line-start'].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns['embedded-xml'].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns['block-start'].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns['paragraph-start'].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [x for x in attributes_list if not (x[0] in '?#*~')]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(', '.join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == '*']
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " + ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == '#']
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " + ", ".join(names))
        language = [x[1:] for x in attributes_list if x[0] == '~']
        if len(language) > 1:
            raise SAMParserError("More than one language specified: " + ", ".join(language))
        conditions = [x[1:] for x in attributes_list if x[0] == '?']
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if language:
            result["xml:lang"] = "".join(language)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Beispiel #42
0
        else:
            print(" #%2.1f+" % val)
        val = math_func(val)
    print( " >>")
    return (newState, val)
def twenties_counter(val):
    print ("TWENTIES State:",)
    while 1:
        if val <= 0 or val >= 30:
            newState = "Out_of_Range"; break
        elif 1 <= val < 10:
            newState = "ONES"; break
        elif 10 <= val < 20:
            newState = "TENS"; break
        else:
            print (" *%2.1f+" % val)
        val = math_func(val)
    print( " >>")
    return (newState, val)
def math_func(n):
    from math import sin
    return abs(sin(n))*31
if __name__== "__main__":
    m = StateMachine()
    m.add_state("ONES", ones_counter)
    m.add_state("TENS", tens_counter)
    m.add_state("TWENTIES", twenties_counter)
    m.add_state("OUT_OF_RANGE", None, end_state=1)
    m.set_start("ONES")
    m.run(1)
    def go(self):
        m = StateMachine()
        # for  tmp in self.__cargos:
        #     print(tmp)
        '''Present'''
        m.add_state("Start", self.start_transitions)
        m.add_state('1A_state', self._1A_state_transitions)
        m.add_state('1C_state', None, end_state=1)
        m.add_state('2A_state', self._2A_state_transitions)
        m.add_state('2B_state', None, end_state=1)
        m.add_state('2C_state', None, end_state=1)

        m.add_state('Z1_VBP_state', self._Z1_VBP_state_transitions)
        m.add_state('Z1_VBZ_state', self._Z1_VBZ_state_transitions)
        m.add_state('1B_VBP_state', None, end_state=1)
        m.add_state('1B_VBZ_state', None, end_state=1)
        m.add_state('1D_state', None, end_state=1)  ### TODO: review, we add quan-subj state for uw-166

        m.add_state('Z2_state', self._Z2_state_transitions)
        m.add_state('3A_state', self._3A_state_transitions)
        m.add_state('3B_state', None, end_state=1)
        m.add_state('3C_state', None, end_state=1)
        m.add_state('4A_state', self._4A_state_transitions)
        m.add_state('4B_state', None, end_state=1)
        m.add_state('4C_state', None, end_state=1)

        '''Past'''
        m.add_state('5A_state', self._5A_state_transitions)
        m.add_state('5C_state', None, end_state=1)
        m.add_state('6A_state', self._6A_state_transitions)
        m.add_state('6B_state', None, end_state=1)
        m.add_state('6C_state', None, end_state=1)

        m.add_state('Z3_state', self._Z3_state_transitions)
        m.add_state('5B_state', None, end_state=1)

        m.add_state('Z4_state', self._Z4_state_transitions)
        m.add_state('7A_state', self._7A_state_transitions)
        m.add_state('7B_state', None, end_state=1)
        m.add_state('7C_state', None, end_state=1)
        m.add_state('8A_state', self._8A_state_transitions)
        m.add_state('8B_state', None, end_state=1)
        m.add_state('8C_state', None, end_state=1)

        '''Future and MD'''
        m.add_state('ZM_state', self._ZM_state_transitions)
        m.add_state('9A_state', self._9A_state_transitions)
        m.add_state('9C_state', None, end_state=1)
        m.add_state('10A_state', self._10A_state_transitions)
        m.add_state('10B_state', None, end_state=1)
        m.add_state('10C_state', None, end_state=1)

        # m.add_state('Z3_state', self._Z3_state_transitions)
        m.add_state('9B_state', self._9B_state_transitions)

        # m.add_state('Z5_state', self._Z5_state_transitions) Z5 merged with 9B
        m.add_state('11A_state', self._11A_state_transitions)
        m.add_state('11B_state', None, end_state=1)
        m.add_state('11C_state', None, end_state=1)
        m.add_state('12A_state', self._12A_state_transitions)
        m.add_state('12B_state', None, end_state=1)
        m.add_state('12C_state', None, end_state=1)
        m.add_state('error_state', None, end_state=1)

        m.set_start("Start")
        m.run(self.__cargos, actions)

        return m.get_tvstate(), m.get_action()