Exemplo n.º 1
0
class Game:
    def __init__(self, context):
        self.ctx = context
        self.m = StateMachine()
        self.m.add_state("init", self.sm_init)
        #       m.add_state("idle", sm_idle)
        self.m.add_state("start", self.sm_start)
        self.m.add_state("end", self.sm_end, end_state=1)
        self.m.set_start("init")
        self.m.run(self.ctx)

    def sm_init(self, ctx):
        # initialize
        newState = "start"
        return newState, ctx

    def sm_start(self, ctx):
        # pick two players, start game
        p1, p2 = self.get_random_players()
        p1.init(10)
        p2.init(10)

        p1.print_status()
        p2.print_status()

        res = None
        player = [p1, p2]
        next_move = 0
        while p1.has_ships() and p2.has_ships():
            x, y = player[next_move].get_move(res)
            res = player[next_move ^ 1].set_move(x, y)
            if res not in ["inj", "sink"]:
                next_move ^= 1

        if p1.has_ships():
            print "Player1 won"
        else:
            print "Player2 won"

        # TODO: should be move to "end" state
        p1.finalize()
        p2.finalize()

        newState = "end"

        return newState, ctx

    def sm_end(self, ctx):
        pass

    def get_random_players(self):
        # ugly:
        players = self.ctx.get_players()
        player1 = players.get_player()
        player2 = players.get_player(not_this_one=player1)
        return player1, player2

    def start_game(self):
        pass
Exemplo n.º 2
0
def startthegoddamnedgame():
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("Game_Over", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)
Exemplo n.º 3
0
def startthegoddamnedgame():
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("Game_Over", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)
Exemplo n.º 4
0
def run(fpin, fpout):

	global fpindex

	fpindex = fpout
	m = StateMachine();
	m.add_state(parse)
	m.add_state(NOTES)
	m.add_state(QUOTES)
	m.add_state(segment)
	m.add_state(error, end_state=1)
	m.add_state(eof, end_state=1)
	m.set_start(parse)
	m.run((fpin, ''))	
Exemplo n.º 5
0
def amplify(init, program, num):
    machine = StateMachine(operators, program)
    for tpl in perm(range(num)):
        output = init
        for signal in tpl:
            machine.send(signal, output)
            output = next(machine.run())
            machine.reset()
        yield output
    def handle(self):
        m = StateMachine()
        try:
            m.add_state('greeting', greeting)
            m.add_state('helo', helo)
            m.add_state('mail', mail)
            m.add_state('rcpt', rcpt)
            m.add_state('data', data)
            m.add_state('process', process)
            m.add_state('done', None, end_state=1)
            m.set_start('greeting')

            m.run((self, {}))

        # in the event of an exception, capture the current
        # state and cargo dict and use the information
        # as part of the message sent to stdout
        except Exception as e:
            exception_data = {'state': m.current_state}
            if m.current_cargo:
                exception_data['data'] = m.current_cargo[1]
            e.args = (exception_data, )
            raise
    def handle (self):
        m = StateMachine()
        try:
            m.add_state('greeting', greeting)
            m.add_state('helo', helo)
            m.add_state('mail', mail)
            m.add_state('rcpt', rcpt)
            m.add_state('data', data)
            m.add_state('process', process)
            m.add_state('done', None, end_state=1)
            m.set_start('greeting')

            m.run((self, {}))

        # in the event of an exception, capture the current
        # state and cargo dict and use the information
        # as part of the message sent to stdout
        except Exception as e:
            exception_data = {'state':m.current_state}
            if m.current_cargo:
                exception_data['data'] = m.current_cargo[1]
            e.args = (exception_data,)
            raise
Exemplo n.º 8
0
def parseBMRB(f): 
    # This is the actual program
    m = StateMachine()
    m.add_state("Start", start)
    m.add_state("open_file", open_file)
    m.add_state("read_lines", read_lines)
    m.add_state("get_sequence", get_sequence)
    m.add_state("get_CS", get_CS)
    m.add_state("get_headers", get_headers)
    m.add_state("get_uniProt", get_uniProt)
    m.add_state("end_reading", end_reading, end_state=1)
    m.add_state("empty_file", empty_file)
    m.add_state("no_file", no_file)
    m.set_start("Start")
    a = m.run(f)[2]
    return a
Exemplo n.º 9
0
def check_transitions(attitude):
    print "产品验收中"
    if attitude == AGREE:
        new_state = "end"
    else:
        new_state = "rollback"
    return new_state, AGREE


def end_transitions(attitude):
    pass


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("start", start_transitions)  # 添加初始状态
    m.add_state("leader_approve", leader_approve_transitions)
    m.add_state("deploy", deploy_transitions)
    m.add_state("rollback", rollback_transitions)
    m.add_state("check", check_transitions)
    m.add_state("end", None, end_state=1)  # 添加最终状态

    m.set_start("start")  # 设置开始状态
    m.run(AGREE)
"""
在流程发起的时候保存状态机信息,包括节点,结束节点,状态转换函数,入库
当前节点入库
审批的时候,加载状态机信息,运行一次(当前节点, 审批意见),得到新结点入库

处理方法是内存地址,可以多存一个模块路径,运行一次的时候
"""
Exemplo n.º 10
0
        print " >>"
    return (newState, val)

def twenties_counter(val):
    print "TWENTIES State:",
    while 1:
        if val <= 0  or  val >= 30:
            newState =  "Out_of_Range"; break
        elif 1 <= val < 10:
            newState =  "ONES"; break
        elif 10 <= val < 20:
            newState =  "TENS"; break
        else:
            print " *%2.1f+" % val,
            val = math_func(val)
        print " >>"
    return (newState, val)

def math_func(n):
     from math import sin
     return abs(sin(n))*31

if __name__== "__main__":
       m = StateMachine()
       m.add_state("ONES", ones_counter)
       m.add_state("TENS", tens_counter)
       m.add_state("TWENTIES", twenties_counter)
       m.add_state("OUT_OF_RANGE", None, end_state=1)
       m.set_start("ONES")
       m.run(1)
Exemplo n.º 11
0
    startstate = MetaRescue
    resumed = True
    logging.info("Resuming at Metadata Rescue...")
else:
    startstate = MetaClone

# STATEMACHINE
sm = StateMachine(0.1, startstate, globals(), locals())

BTRACE_POLL_COUNT = 0
def btrace_poller(smobj):
    global BTRACE_POLL_COUNT
    if btrace.blkparse is not None and btrace.parser is not None:
        lines_read = btrace.parser.read_btrace()
        BTRACE_POLL_COUNT += 1
        if lines_read > 0:
            # unused are marked finished so when ANDed using ddrescuelog
            # only definitely unused parts remain finished
            btracelog = btrace.parser.write_ddrescuelog(OPTIONS,
                'non-tried', 'finished', 0, DEVSIZE)
            if ddrescue.VIEWER is not None:
                shutil.copyfile(btracelog, ddrescue.ddrlog)
sm.add_persistent_task(btrace_poller)

# EXECUTE
sm.run()

# CLEANUP BEFORE NORMAL EXIT
cleanup()

Exemplo n.º 12
0
            newState = "pos_state"
        elif word in negative_adjectives:
            newState = "neg_state"
        else:
            newState = "error_state"
        return (newState, txt)
    elif state == "not_state":
        if word in positive_adjectives:
            newState = "neg_state"
        elif word in negative_adjectives:
            newState = "pos_state"
        else:
            newState = "error_state"
        return (newState, txt)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", transitions)
    m.add_state("Python_state", transitions)
    m.add_state("is_state", transitions)
    m.add_state("not_state", transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is great")
    """
    m.run("Python is difficult")
    m.run("Perl is ugly")
    """
Exemplo n.º 13
0
def not_state_transitions(txt):
    splitted_txt = txt.split(None, 1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt, "")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)


def neg_state(txt):
    print("Hallo")
    return ("neg_state", "")


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is great")
    m.run("Python is difficult")
    m.run("Perl is ugly")
Exemplo n.º 14
0
    global proceso_servos

    # Ventana de control de los servomotores #
    if servos_enable == False:
        servos_enable = True
        proceso_servos = subprocess.Popen(['python','control_camera_position.py'])
    else:
        msgbox("El programa de control de la cámara ya está abierto.", title="Atención")

    return("STATE_Buttons",txt)
    

    
    

# Funcion MAIN 
if __name__== "__main__":

    # Creamos la FSM #
    m = StateMachine()
    m.add_state("STATE_Welcome", start_window)
    m.add_state("STATE_Buttons", buttons_window)
    m.add_state("STATE_Camera", camera_handler)
    m.add_state("STATE_Graficos", graficos_hanlder)
    m.add_state("STATE_Exit", exit_function)
    m.add_state("STATE_R_U_SURE", are_you_sure_function)
    m.add_state("STATE_cameracontrol", cameracontrol_function)
    m.add_state("Bye_state",None,end_state=1)
    m.set_start("STATE_Welcome")
    m.run("Exec")
Exemplo n.º 15
0
    throw = throws[0]
    throws = throws[1:]
    newState = "000"
    if throw == "3":
        newState = "111"
    elif throw == "4":
        newState = "1101"
    elif throw == "5":
        newState = "11001"

    return (newState, throws)


def s_1101_transitions(throws):
    throw = throws[0]
    throws = throws[1:]
    newState = "000"
    if throw == "2":
        newState = "111"

    return (newState, throws)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("111", s_111_transitions)
    m.add_state("1101", s_1101_transitions)
    m.set_start("111")
    m.run("42", 2)
    m.run("423", 3)
Exemplo n.º 16
0
def not_state_transitions(txt):
    splitted_txt = txt.split(None,1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt,"")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)

def neg_state(txt):
    print("Hallo")
    return ("neg_state", "")

if __name__== "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is great")
    m.run("Python is not great")
    m.run("Python is foolish")
#    m.run("Python is difficult")
#    m.run("Perl is ugly")
Exemplo n.º 17
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM", self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            "sam-declaration": re.compile(r"sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?"),
            "comment": re.compile(r"\s*#.*"),
            "block-start": re.compile(
                r"(?P<indent>\s*)(?P<element>[\w_\.-]+?):(\((?P<attributes>.*?(?<!\\))\))?(?P<content>.+)?"
            ),
            "codeblock-start": re.compile(
                r'(?P<indent>\s*)(?P<flag>```[^\s\(]*)(\((?P<language>\w*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?'
            ),
            "blockquote-start": re.compile(
                r'(?P<indent>\s*)("""|\'\'\'|blockquote:)(\((?P<attributes>.*?(?<!\\))\))?((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?'
            ),
            "fragment-start": re.compile(r"(?P<indent>\s*)~~~(\((?P<attributes>.*?)\))?"),
            "paragraph-start": re.compile(r"\w*"),
            "line-start": re.compile(r"(?P<indent>\s*)\|(\((?P<attributes>.*?)\))?\s(?P<text>.*)"),
            "blank-line": re.compile(r"^\s*$"),
            "record-start": re.compile(r"(?P<indent>\s*)(?P<record_name>[a-zA-Z0-9-_]+)::(?P<field_names>.*)"),
            "list-item": re.compile(r"(?P<indent>\s*)(?P<marker>\*\s+)(?P<content>.*)"),
            "num-list-item": re.compile(r"(?P<indent>\s*)(?P<marker>[0-9]+\.\s+)(?P<content>.*)"),
            "labeled-list-item": re.compile(r"(?P<indent>\s*)\|(?P<label>\S.*?)(?<!\\)\|\s+(?P<content>.*)"),
            "block-insert": re.compile(r"(?P<indent>\s*)>>\((?P<attributes>.*?)\)\w*"),
            "string-def": re.compile(r"(?P<indent>\s*)\$(?P<name>\w*?)=(?P<value>.+)"),
            "embedded-xml": re.compile(r"(?P<indent>\s*)(?P<xmltag>\<\?xml.+)"),
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError("Document ended before structure was complete. At:\n\n" + self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns["sam-declaration"].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        element = match.group("element").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        self.doc.new_block(element, attributes, para_parser.parse(content, self.doc), indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns["codeblock-end"] = re.compile(r"(\s*)" + codeblock_flag + "\s*$")

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes["language"] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block("codeblock", attributes, None, indent)
        self.pre_start("")
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns["codeblock-end"].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", context
        else:
            self.pre_append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group("indent"))

        # TODO: Refactor this with the paraparser version

        extra = source.current_line.rstrip()[len(match.group(0)) :]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " + extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block("blockquote", attributes, None, indent)

        # see if there is a citation
        try:
            idref = match.group("id")
        except IndexError:
            idref = None
        try:
            nameref = match.group("name")
        except IndexError:
            nameref = None
        try:
            citation = match.group("citation")
        except IndexError:
            citation = None

        if idref:
            citation_type = "idref"
            citation_value = idref.strip()
            extra = match.group("id_extra")
        elif nameref:
            citation_type = "nameref"
            citation_value = nameref.strip()
            extra = match.group("name_extra")
        elif citation:
            citation_type = "citation"
            citation_value = citation.strip()
        else:
            citation_type = None

        if citation_type:
            cit = Citation(citation_type, citation_value, extra)
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group("indent"))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block("fragment", attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, "", local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            return "END", context

        if self.patterns["blank-line"].match(line):
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(["p", "li"]):
            f = para_parser.parse(self.current_paragraph, self.doc)
            self.doc.new_flow(f)
            source.return_line()
            return "SAM", context

        self.paragraph_append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        content_indent = indent + len(match.group("marker"))
        self.doc.new_unordered_list_item(indent, content_indent)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        content_indent = indent + len(match.group("marker"))
        self.doc.new_ordered_list_item(indent, content_indent)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        self.doc.new_labeled_list_item(indent, label)
        self.paragraph_start(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert", attributes=parse_insert(match.group("attributes")), text=None, indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group("name"), para_parser.parse(match.group("value"), self.doc), indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block(
            "line",
            self.parse_block_attributes(match.group("attributes")),
            para_parser.parse(match.group("text"), self.doc, strip=False),
            indent=indent,
        )
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("record_name").strip()
        field_names = [x.strip() for x in match.group("field_names").split(",")]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns["blank-line"].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r"(?<!\\),", line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError("Record length does not match record set header. At:\n\n " + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler = self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code == 9:  # junk after document element
                source.return_line()
                xml_text = "".join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError("The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError("The encoding of an embedded XML fragment must be UTF-8.")

    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns["comment"].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns["record-start"].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns["blank-line"].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns["codeblock-start"].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns["blockquote-start"].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns["fragment-start"].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns["list-item"].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns["num-list-item"].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns["labeled-list-item"].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns["block-insert"].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns["string-def"].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns["line-start"].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns["embedded-xml"].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns["block-start"].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns["paragraph-start"].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [x for x in attributes_list if not (x[0] in "?#*")]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(", ".join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == "*"]
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " + ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == "#"]
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " + ", ".join(names))
        conditions = [x[1:] for x in attributes_list if x[0] == "?"]
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Exemplo n.º 18
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'comment': re.compile(r'\s*#.*'),
            'block-start':
            re.compile(r'(\s*)([a-zA-Z0-9-_]+):(?:\((.*?)\))?(.*)'),
            'codeblock-start': re.compile(r'(\s*)```(.*)'),
            'codeblock-end': re.compile(r'(\s*)```\s*$'),
            'paragraph-start': re.compile(r'\w*'),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(r'\s*[a-zA-Z0-9-_]+::(.*)'),
            'list-item': re.compile(r'(\s*)(\*\s+)(.*)'),
            'num-list-item': re.compile(r'(\s*)([0-9]+\.\s+)(.*)'),
            'block-insert': re.compile(r'(\s*)>>\(.*?\)\w*')
        }

    def parse(self, source):
        self.source = source
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise Exception(
                "Document ended before structure was complete. At:\n\n" +
                self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        if line[:4] == 'sam:':
            self.doc.new_root('sam', line[5:])
            return "SAM", source
        else:
            raise Exception("Not a SAM file!")

    def _block(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        indent = len(match.group(1))
        element = match.group(2).strip()
        attributes = match.group(3)
        content = match.group(4).strip()

        if content[:1] == ':':
            return "RECORD-START", source
        else:
            self.doc.new_block(element, attributes, content, indent)
            return "SAM", source

    def _codeblock_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        match = self.patterns['codeblock-start'].match(line)
        attributes = re.compile(r'\((.*?)\)').match(match.group(2).strip())
        language = attributes.group(1)
        self.doc.new_block('codeblock', language, None, local_indent)
        self.pre_start('')
        return "CODEBLOCK", source

    def _codeblock(self, source):
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", source
        else:
            self.pre_append(line)
            return "CODEBLOCK", source

    def _paragraph_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_block('p', None, '', local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", source

    def _paragraph(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            para_parser.parse(self.current_paragraph, self.doc)
            return "SAM", source
        else:
            self.paragraph_append(line)
            return "PARAGRAPH", source

    def _list_item(self, source):
        line = source.currentLine
        match = self.patterns['list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_unordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _num_list_item(self, source):
        line = source.currentLine
        match = self.patterns['num-list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_ordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _block_insert(self, source):
        line = source.currentLine
        indent = len(source.currentLine) - len(source.currentLine.lstrip())
        attribute_pattern = re.compile(r'\s*>>\((.*?)\)')
        match = attribute_pattern.match(line)
        self.doc.new_block('insert',
                           text='',
                           attributes=parse_insert(match.group(1)),
                           indent=indent)
        return "SAM", source

    def _record_start(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        local_indent = len(match.group(1))
        local_element = match.group(2).strip()
        field_names = [
            x.strip() for x in self.patterns['record-start'].match(line).group(
                1).split(',')
        ]
        self.doc.new_record_set(local_element, field_names, local_indent)
        return "RECORD", source

    def _record(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            return "SAM", source
        else:
            field_values = [x.strip() for x in line.split(',')]
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", source

    def _sam(self, source):
        try:
            line = source.next_line
        except EOFError:
            return "END", source
        if self.patterns['comment'].match(line):
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", source
        elif self.patterns['block-start'].match(line):
            return "BLOCK", source
        elif self.patterns['blank-line'].match(line):
            return "SAM", source
        elif self.patterns['codeblock-start'].match(line):
            return "CODEBLOCK-START", source
        elif self.patterns['list-item'].match(line):
            return "LIST-ITEM", source
        elif self.patterns['num-list-item'].match(line):
            return "NUM-LIST-ITEM", source
        elif self.patterns['block-insert'].match(line):
            return "BLOCK-INSERT", source
        elif self.patterns['paragraph-start'].match(line):
            return "PARAGRAPH-START", source
        else:
            raise Exception("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)
Exemplo n.º 19
0
    client.registerRemoteBuffer(TARGET_LOCATION, DOWNWARD_VISION_SERVER_IP,
                                DOWNWARD_VISION_SERVER_ID)
    client.registerRemoteBuffer(TARGET_LOCATION, SONAR_SERVER_IP,
                                SONAR_SERVER_ID)
    client.registerRemoteBuffer(MOTOR_KILL, MOTOR_SERVER_IP, MOTOR_SERVER_ID)
    time.sleep(1)

    print("Creating State Machine")
    m = StateMachine()

    m.add_state("Start", start_transitions)
    m.add_state("Kill", kill_transitions)
    m.add_state("IsKilled", iskilled_transitions)
    m.add_state("GateDeadReckon", gatedr_transitions)
    m.add_state("GateVisionFeedback", gatevisionfeed_transitions)
    m.add_state("GateVision", gatevision_transitions)
    m.add_state("PathFinder", pathfinder_transitions)
    m.add_state("PathOrientation", pathorient_transitions)
    m.add_state("SetDepth", set_depth_transitions)
    m.add_state("BuoyDeadReckon", buoydr_transitions)
    m.add_state("CheckBuoy", checkbuoy_transitions)
    m.add_state("BuoyVision", buoyvision_transitions)
    m.add_state("SonarFinder", sonarfinder_transitions)
    m.add_state("SonarOrientation", sonarorient_transitions)
    m.add_state("OctoDeadReckon", sonardr_transitions)
    m.add_state("Error", None, end_state=1)
    m.add_state("EndOfRun", None, end_state=1)

    m.set_start("Kill")
    m.run("PLACEHOLDER")
Exemplo n.º 20
0
def statu_neg(txt):
    print("Hallo")
    return ("statu_neg", "")


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)
    m.add_state("statu_Paris7", paris7_state_transitions)
    m.add_state("statu_est", statu_est_transitions)
    m.add_state("statu_non", statu_non_transitions)
    m.add_state("statu_neg", None, end_state=1)
    m.add_state("statu_pos", None, end_state=1)
    m.add_state("statu_error", None, end_state=1)
    m.set_start("Start")
    '''
    m.run("Paris7 est super")
    m.run("Paris7 est connu")
    m.run("Paris6 est futile")
    m.run("Paris7 est non magnifique")
    '''
    print('Vous devez entrer une phrase, telle que : ')
    print('Paris7 est super ou Paris7 est non super')
    print('Adjectif positif : magnifique, super, connu, joli, important')
    print('Adjectif négatif : futile, difficile, stressant, bad')

    while (True):
        val = input('\nVueiller entrer votre phrase : \n')
        m.run(val)
Exemplo n.º 21
0
        return (newState, command, nparray)
    else:
        return (newState, command)


def process_transition(data):
    global nparray
    nparray = 5 * np.array(nparray)
    print("printing escalar")
    print(nparray)
    nparray = nparray.transpose()
    print("printing transpose ")
    print(nparray)
    command = input(" Type the next Command: ")
    if command == "Stop":
        newState = "Stopped"
    else:
        newState = "Collecting"
    print(newState)
    return (newState, command)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Stopped", stop_transition)
    m.add_state("Started", start_transition)
    m.add_state("Collecting", collect_transition)
    m.add_state("Processing", process_transition)
    m.set_start("Stopped")
    m.run("")
Exemplo n.º 22
0
def not_state_transitions(txt):
    splitted_txt = txt.split(None, 1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt, "")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)


def neg_state(txt):
    print("Hallo")
    return ("neg_state", "")


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start("Start")
    m.run("Python is not great")
    m.run("Python is difficult")
    m.run("Perl is ugly")
Exemplo n.º 23
0
    #Stop the robot and lift the message
    # issue command to arm for lift message

    return ("Completed_task")


def completed_state():
    global restart
    print("####$$$$*****completed task*****####")
    return ()


if __name__ == "__main__":

    m = StateMachine()

    m.add_state("Start", start_trans)
    m.add_state("message_detected", message_detected_trans)
    m.add_state("walking_in_gobi", walking_in_gobi_trans)
    m.add_state("line2_checkpoint", line2_checkpoint_trans)
    m.add_state("sand_dune_detected", sand_dune_detected_trans)
    m.add_state("line3_checkpoint", line3_checkpoint_trans)
    m.add_state("mountain_urtu", mountain_urtu_trans)
    m.add_state("mountain_climb", mountain_climb_trans)
    m.add_state("reached_top", reached_top_trans)
    #   m.add_state("lift_message", None ,end_state=1)

    m.set_start("Start")

    m.run("Starting the robot")
Exemplo n.º 24
0
    return (newState, txt)


def not_state_transitions(txt):
    splitted_txt = txt.split(None, 1)
    word, txt = splitted_txt if len(splitted_txt) > 1 else (txt, "")
    if word in positive_adjectives:
        newState = "neg_state"
    elif word in negative_adjectives:
        newState = "pos_state"
    else:
        newState = "error_state"
    return (newState, txt)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("Start", start_transitions)  # 添加初始状态
    m.add_state("Python_state", python_state_transitions)
    m.add_state("is_state", is_state_transitions)
    m.add_state("not_state", not_state_transitions)
    m.add_state("neg_state", None, end_state=1)  # 添加最终状态
    m.add_state("pos_state", None, end_state=1)
    m.add_state("error_state", None, end_state=1)

    m.set_start("Start")  # 设置开始状态
    m.run("Python is great")
    m.run("Python is not fun")
    m.run("Perl is ugly")
    m.run("Pythoniseasy")
Exemplo n.º 25
0
    curses.cbreak()

    # map arrow keys to special values
    screen.keypad(True)

    mode = "awake"

    try:
        menu = StateMachine()
        menu.add_state("MeasureScreen", measure_screen)
        menu.add_state("MeasureSleep", measure_sleep)
        menu.add_state("MenuMeasure", menu_measure)
        menu.add_state("MenuScreenOff", menu_screen_off)
        menu.add_state("ScreenOff", screen_off)
        menu.add_state("MenuCalibrate", menu_calibrate)
        menu.add_state("Calibrate", calibrate)
        menu.add_state("MenuTurnOff", menu_turn_off)

        #menu.add_state("")

        menu.add_state("TurnOff", None, end_state=1)

        menu.set_start("MeasureScreen")
        menu.run("None")

    finally:
        curses.nocbreak()
        screen.keypad(0)
        curses.echo()
        curses.endwin()
Exemplo n.º 26
0
class ParserScript:
	def __init__(self,db,file_name,log_file_name=None):
		self.sm = StateMachine()
		self.db=db
		self.cur = self.db.cursor()
		file=codecs.open(file_name,'r','utf8')
		if log_file_name: self.log=codecs.open(log_file_name,'w','utf8')
		self.line=self.get_script_line(file)
		self.SQL=u''
		self.r=re.compile('SET[\\s]+TERM')
		self.ex_term=';'
		self.create_state(self.sm)
		self.State=''
		self.log=None
		print "Parse script class"
		
	def run(self):
		self.sm.run(['','READ_LINE'])        

	def get_script_line(self,file):
		while 1:
			str=file.readline()
			if not str: break
			yield str

	def get_line(self,val):         
		if len(val[0].strip())==0:
			newState='CLEAN_REM'
			val[1]=newState
		else:
			newState=val[1]
		try:
			s=self.line.next()
			val[0]+=s
		except:
			newState='COMMIT'
		if newState<>'COMMIT' and len(re.sub('\\s+','',val[0]).strip())==0 :
			newState='READ_LINE'
		return (newState, val)

	def clean_leades_rem(self,val): 
		newState='EX_TERM'
		if val[0].strip()[:2]=='--':
			val[0]=''
			newState='READ_LINE'
		if val[0].strip()[:2]=='/*':
			pos=val[0].find('*/')
			if pos==-1:
				newState='READ_LINE'
			else:
				val[0]=re.sub('\\s+','',val[0][pos+2:]) 
				if len(val[0].strip())==0:
					newState='READ_LINE'
		val[1]=newState
		return (newState, val)

	def get_term(self,val): 
		pos_term=val[0].find(self.ex_term)
		if pos_term==-1:
				newState='READ_LINE'
				val[1]='TERM'
		else:
			self.SQL+=val[0][:pos_term]
			newState='EXEC_SQL'
			val[0]=val[0][pos_term+1:]
		return (newState, val)
		
	def get_ex_term(self,val):
		#print 'EX_TERM>> '+val[0]
		pos_ex_term=self.r.search(val[0])
		newState='EXT_TERM'
		if pos_ex_term==None: 
			newState='TERM'
		elif val[0][:pos_ex_term.start()].strip()=='':
			pos=val[0].find(self.ex_term)
			if pos==-1:
				val[1]=newState
				newState='READ_LINE'
			else:
				self.ex_term=re.sub('SET[\\s]+TERM','',val[0][pos_ex_term.start():pos]).strip()
				val[1]=newState
				newState='READ_LINE'
				val[0]=val[0][pos+1:]
		else:
			if val[0][:pos_ex_term].find(';')<>-1:
				val[1]=newState
				newState='TERM'
		return (newState, val)

	def exec_SQL(self,val):
		#print self.SQL+'<<\n'
		if self.log:
			self.log.write(self.SQL)
			self.log.write('<<\n')
		try:
			self.cur.execute(self.SQL)
		except kdb.ProgrammingError,e:
			if e[0]<>-607:
				newState='ERROR'
		self.SQL=''
		newState=val[1]
		return (newState, val)
Exemplo n.º 27
0
def B_transition(text):
    if (len(text) < 1):
        raise StateError
    else:
        input = text[0]
        if input == 'a':
            if len(text) == 1:
                newState = 'T'
            else:
                newState = 'B'
        elif input == 'b':
            newState = 'S'
        else:
            raise StateError
    return (newState, text[1:])


def T_transition(text):
    return ('T', text)


if __name__ == '__main__':
    FM = StateMachine()
    FM.add_state('S', S_transition, start=True)
    FM.add_state('B', B_transition)
    FM.add_state('T', T_transition, end=True)

    #测试输入链
    FM.run('aababaaababaaa')
    FM.run('ababaababaaba')
Exemplo n.º 28
0
def run(robot: cozmo.robot.Robot):

    robot.world.image_annotator.annotation_enabled = False
    robot.world.image_annotator.add_annotator('box', BoxAnnotator)

    robot.camera.image_stream_enabled = True
    robot.camera.color_image_enabled = True
    robot.camera.enable_auto_exposure = True

    fixed_gain,exposure,mode = 3.90,67,1
    FSM = StateMachine()
    find = findCube()
    red = redCube()
    yellow = yellowCube()
    both = bothCube()
    FSM.addState(red)
    FSM.addState(yellow)
    FSM.addState(both)
    FSM.addState(find)
    FSM.setStartState(find)
    try:
        robot.set_head_angle(degrees(-5)).wait_for_completed()
        FSM.run(robot)

#         while True:
#             event = await robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)   #get camera image
#             if event.image is not None:
#                 image = np.asarray(event.image)#cv2.cvtColor(np.asarray(event.image), cv2.COLOR_BGR2RGB)
#
#
#                 if mode == 1:
#                     robot.camera.enable_auto_exposure = True
#                 else:
#                     robot.camera.set_manual_exposure(exposure,fixed_gain)
#
#                 #find the cube
#                 cube = find_cube(image, YELLOW_LOWER, YELLOW_UPPER)
#                 print(cube)
#                 i = 0
#                 while not cube and i < 5:
#                     cube = find_cube(image, YELLOW_LOWER, YELLOW_UPPER)
# #                    print(cube)
#                     BoxAnnotator.cube = cube
#                     i+=1
#
#                 ################################################################
#
#                 # Todo: Add Motion Here
#                 ################################################################
#
#                 if cube and cube[2] >= 70:
#                    print("stop")
#                 elif cube and cube[0] < 120:
#                     action = robot.turn_in_place(radians(0.1))
#                     await action.wait_for_completed()
#                 elif cube and cube[0] > 180:
#                     action = robot.turn_in_place(radians(-0.1))
#                     await action.wait_for_completed()
#                 elif cube and cube[2] < 70:
#                     action = robot.drive_straight(distance_mm(30), Speed(1000), should_play_anim=False)
#                     await action.wait_for_completed()
#                 else:
#                     action = robot.turn_in_place(radians(0.3))
#                     await action.wait_for_completed()



    except KeyboardInterrupt:
        print("")
        print("Exit requested by user")
    except cozmo.RobotBusy as e:
        print(e)
Exemplo n.º 29
0
    if floor in L2:
        print("Lift is already at "+str(floor))
        L2.remove(floor)

    if(len(L2)>0):   
        print("Enter UP or DOWN for each floor")
        for i in range(len(L2)):
            L2_button.append(raw_input(str(L2[i])+" = "))

    top_floor=0
    bottom_floor=tot_floors
    for i in range(len(L1)):
        if top_floor<L1[i]:
            top_floor=L1[i]
        if bottom_floor>L1[i]:
            bottom_floor=L1[i]
    for i in range(len(L2)):
        if top_floor<L2[i]:
            top_floor=L2[i]
        if bottom_floor>L2[i]:
            bottom_floor=L2[i]
    print("--------------------------------------")
    lift = StateMachine()
    lift.add_state("Idle_state", idle)
    lift.add_state("Up_state", moving_up)
    lift.add_state("Down_state", moving_down)
    lift.add_state("Stop_state", None, end_state=1)
    currentState="Idle"
    lift.set_start("Idle_state")
    lift.run(floor)
Exemplo n.º 30
0
from statemachine import StateMachine


def ones_counter(val):
    print "ONES State:    ",
    while 1:
        if val <= 0 or val >= 30:
            newState = "Out_of_Range"
            break
        else:
            print "  @ %2.1f+" % val,
    print "  >>"
    return (newState, val)


def test(val):
    print val


def test1(val):
    print val


if __name__ == '__main__':
    stat = StateMachine()
    stat.add_state("test", test)
    stat.add_state("test1", test)
    stat.set_start("test")
    stat.add_state("OUT_OF_RANGE", None, end_state=1)
    stat.run(1)
Exemplo n.º 31
0
    print 'player 2 wins!'
    newState = 'game_end'

    return(newState, inpList)


def game_end(inpList):
    print 'End of game!'
    return None


##########################################################################
# Setting up game:
allTiles = []
allTiles.extend(User1.tiles)
allTiles.extend(User2.tiles)

if __name__ == "__main__":
    m = StateMachine()
    m.add_state("GameStarts", game_started)
    m.add_state("p1TurnStart", p1_turn_start)
    m.add_state("p2TurnStart", p2_turn_start)
    m.add_state("p1TurnEnd", p1_turn_end)
    m.add_state("p2TurnEnd", p2_turn_end)
    m.add_state("p1Win", p1_win)
    m.add_state("p2Win", p2_win)
    m.add_state("GameOver", game_end)
    m.add_state("Out of range", None, end_state=1)
    m.set_start("GameStarts")
    m.run(allTiles)
Exemplo n.º 32
0
class ParserCmdLine:
	def __init__(self,argv):
		#self.argv=argv
		self.params={'-h':'localhost','-d':'','-s':'','-u':'SYSDBA','-p':'masterkey'}
		self.ch='UTF8'
		self.ch=self.detect_codecs()
		self.mydb=None
		self.cmd=None
		self.sm = StateMachine()
		self.create_state(self.sm)
		self.parse_cmd(argv)
		

	def bad_parameters(self):
		print u'Ошибка при вводе параметров'
		self.help()

	def help(self):
		print u'''Использование: 
setup.py i [-h host] [-d database] [-s dsn] [-u user] [-p password] 
setup.py c [-h host] [-d database] [-s dsn] [-u user] [-p password] 
Комманды:
	i	- создать в БД структуры Индексатора, 
			требует файл indexator.sql
	c	- удалить из БД структуры Индексатора
	u	- обновить в БД структуры Индексатора, 
			требует файл update_xxxx.sql
Параметры:
	-h host 	- имя сервера БД
	-d database - алиас или путь к базе данных на сервере
	-s dsn 		- источник данных в формате host:database
	-u user 	- имя пользователя (по умолчанию SYSDBA)
	-p password - пароль (по умолчанию masterkey)
		'''.encode(self.ch)
		
	def no_connect(self):
		print u"Ошибка подключения! Проверте параметры подключения и доступность сервера.".encode(self.ch)   
		self.cmd=''

		
	def connect_db(self):
		try:
			if (self.params['-s']==''):# and self.params['-d']!=''):
				print self.params
				self.mydb=kdb.connect(host=self.params['-h'],database=self.params['-d'],user=self.params['-u'], password=self.params['-p'],dialect=3, charset='UTF8' )
			else:
				self.mydb=kdb.connect(dsn=self.params['-s'],user=self.params['-u'], password=self.params['-p'],dialect=3, charset='UTF8' )
			return self.mydb
		except:
			print 'Except'
			self.no_connect()
			return None

	def detect_codecs(self):
		if sys_name=='nt':
			return 'CP866'
		else:
			return 'UTF8'
	
	def parse_cmd(self,argv):
		is_parse=1
		i=0
		cmd=''
		print len(argv)
		if len(argv)>1:
			try:
				while i<len(argv):
					print argv[i],i,argv[i] in ['i','c','u'] 
					if argv[i] in ['i','c','u']:
						cmd=argv[i]
						print cmd
						i+=1
					elif self.params.has_key(argv[i]):
						self.params[argv[i]]=argv[i+1]
						print argv[i+1]
						i+=2
					else:
						print i
						self.bad_parameters()
						is_parse=0
						break
				print self.params
			except:
				self.bad_parameters()
				is_parse=0
			else: 
				if is_parse: 
					self.cmd=cmd
					self.mydb=self.connect_db()
		else:
			self.help()


	def create_state(self,m):
			m.add_state('c', self.clean_db) #, 'TERM','EXT_TERM','COMMIT','SKIP'
			m.add_state('cid', self.clean_db) #clean after install
			m.add_state('id', self.create_db_structures)
			m.add_state('d', self.create_dictionares)
			m.add_state('u',self.update_db)
			m.add_state('ERROR',self.error)
			m.add_state('',None,end_state=1)
			
	def run_command(self):
		if self.cmd:
			if self.cmd=='i': 
				cmd='cid'
			else:
				cmd=self.cmd
			self.sm.set_start(cmd)
			self.sm.run(cmd)
		
	
	def clean_db(self,val):
		import clean_db
		CC=clean_db.IndexerClean()
		if CC.clean(self.mydb)==1:
			newState=val[1:]
		else:
			newState='ERROR'
		return (newState, newState)
		
	def create_db_structures(self,val):
		from parse_script import ParserScript
		input_file=os.path.dirname(__file__)+'\\indexer.sql'
		#output_file=os.path.dirname(__file__)+'\\out.sql'
		parser=ParserScript(self.mydb,input_file)
		parser.run()
		if parser.State=='COMPLIT':
			newState=val[1:]
		else:
			newState='ERROR'
			print "Error in create structures"
		return (newState, newState)
		
	def create_dictionares(self,val):
		import dict_encode
		if dict_encode.run(self.mydb)==1:
			newState=val[1:]
		else:
			newState='ERROR'
			print "Error in create dictionaries"
		return (newState, newState)
	
	def update_db(self,val): 
		return('','')
	
	def error(self,val):
		print 'ERROR in state '+val
		newState=''
		return (newState, newState)
Exemplo n.º 33
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("MONO-START", self._mono_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            "escape": re.compile(r"\\"),
            "escaped-chars": re.compile(r"[\\\(\{\}\[\]_\*,`]"),
            "annotation": re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\))?'
            ),
            "bold": re.compile(r"\*(?P<text>\S.+?\S)\*"),
            "italic": re.compile(r"_(?P<text>\S.*?\S)_"),
            "mono": re.compile(r"`(?P<text>\S.*?\S)`"),
            "quotes": re.compile(r'"(?P<text>\S.*?\S)"'),
            "inline-insert": re.compile(r">>\((?P<attributes>.*?)\)"),
            "citation": re.compile(
                r"(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])"
            ),
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ""
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ""
            return "END", para
        if char == "\\":
            return "ESCAPE", para
        elif char == "{":
            return "ANNOTATION-START", para
        elif char == "[":
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "MONO-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns["annotation"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            annotation_type = match.group("type")
            text = match.group("text")

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else raise an exception.
                    else:
                        raise SAMParserError(
                            "Blank annotation found: {"
                            + text
                            + "} "
                            + "If you are trying to insert curly braces "
                            + "into the document, use \{"
                            + text
                            + "]. Otherwise, make sure annotated text matches "
                            "previous annotation exactly."
                        )
            elif annotation_type.strip() == "":
                raise SAMParserError("Annotation type cannot be blank: " + match.group(0))
            else:
                # Check for link shortcut
                if urlparse(annotation_type, None).scheme is not None:
                    specifically = annotation_type
                    annotation_type = "link"
                else:
                    specifically = match.group("specifically") if match.group("specifically") is not None else None
                namespace = match.group("namespace").strip() if match.group("namespace") is not None else None
                self.flow.append(Annotation(annotation_type.strip(), text, specifically, namespace))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += "{"
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns["citation"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""

            try:
                idref = match.group("id")
            except IndexError:
                idref = None
            try:
                nameref = match.group("name")
            except IndexError:
                nameref = None
            try:
                citation = match.group("citation")
            except IndexError:
                citation = None

            if idref:
                citation_type = "idref"
                citation_value = idref.strip()
                extra = match.group("id_extra")
            elif nameref:
                citation_type = "nameref"
                citation_value = nameref.strip()
                extra = match.group("name_extra")
            else:
                citation_type = "citation"
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += "["
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns["bold"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("bold", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "*"
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns["italic"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("italic", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "_"
        return "PARA", para

    def _mono_start(self, para):
        match = self.patterns["mono"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("mono", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += "`"
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns["quotes"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(Decoration("quotes", match.group("text")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns["inline-insert"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += ">"
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns["inline-insert_id"].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ""
            self.flow.append(InlineInsert("reference", match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += ">"
        return "PARA", para

    def _escape(self, para):
        char = para.next_char
        if self.patterns["escaped-chars"].match(char):
            self.current_string += char
        else:
            self.current_string += "\\" + char
        return "PARA", para
Exemplo n.º 34
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_paragraph = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'comment': re.compile(r'\s*#.*'),
            'block-start': re.compile(r'(\s*)([a-zA-Z0-9-_]+):(?:\((.*?)\))?(.*)'),
            'codeblock-start': re.compile(r'(\s*)```(.*)'),
            'codeblock-end': re.compile(r'(\s*)```\s*$'),
            'paragraph-start': re.compile(r'\w*'),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(r'\s*[a-zA-Z0-9-_]+::(.*)'),
            'list-item': re.compile(r'(\s*)(\*\s+)(.*)'),
            'num-list-item': re.compile(r'(\s*)([0-9]+\.\s+)(.*)'),
            'block-insert': re.compile(r'(\s*)>>\(.*?\)\w*')
        }

    def parse(self, source):
        self.source = source
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise Exception("Document ended before structure was complete. At:\n\n"
                            + self.current_paragraph)

    def paragraph_start(self, line):
        self.current_paragraph = line.strip()

    def paragraph_append(self, line):
        self.current_paragraph += " " + line.strip()

    def pre_start(self, line):
        self.current_paragraph = line

    def pre_append(self, line):
        self.current_paragraph += line

    def _new_file(self, source):
        line = source.next_line
        if line[:4] == 'sam:':
            self.doc.new_root('sam', line[5:])
            return "SAM", source
        else:
            raise Exception("Not a SAM file!")

    def _block(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        indent = len(match.group(1))
        element = match.group(2).strip()
        attributes = match.group(3)
        content = match.group(4).strip()

        if content[:1] == ':':
            return "RECORD-START", source
        else:
            self.doc.new_block(element, attributes, content, indent)
            return "SAM", source

    def _codeblock_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        match = self.patterns['codeblock-start'].match(line)
        attributes = re.compile(r'\((.*?)\)').match(match.group(2).strip())
        language = attributes.group(1)
        self.doc.new_block('codeblock', language, None, local_indent)
        self.pre_start('')
        return "CODEBLOCK", source

    def _codeblock(self, source):
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_paragraph))
            return "SAM", source
        else:
            self.pre_append(line)
            return "CODEBLOCK", source

    def _paragraph_start(self, source):
        line = source.currentLine
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_block('p', None, '', local_indent)
        self.paragraph_start(line)
        return "PARAGRAPH", source

    def _paragraph(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            para_parser.parse(self.current_paragraph, self.doc)
            return "SAM", source
        else:
            self.paragraph_append(line)
            return "PARAGRAPH", source

    def _list_item(self, source):
        line = source.currentLine
        match = self.patterns['list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_unordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source


    def _num_list_item(self, source):
        line = source.currentLine
        match = self.patterns['num-list-item'].match(line)
        local_indent = len(match.group(1))
        content_indent = local_indent + len(match.group(2))
        self.doc.new_ordered_list_item(local_indent, content_indent)
        self.paragraph_start(str(match.group(3)).strip())
        return "PARAGRAPH", source

    def _block_insert(self, source):
        line = source.currentLine
        indent = len(source.currentLine) - len(source.currentLine.lstrip())
        attribute_pattern = re.compile(r'\s*>>\((.*?)\)')
        match = attribute_pattern.match(line)
        self.doc.new_block('insert', text='', attributes=parse_insert(match.group(1)), indent=indent)
        return "SAM", source

    def _record_start(self, source):
        line = source.currentLine
        match = self.patterns['block-start'].match(line)
        local_indent = len(match.group(1))
        local_element = match.group(2).strip()
        field_names = [x.strip() for x in self.patterns['record-start'].match(line).group(1).split(',')]
        self.doc.new_record_set(local_element, field_names, local_indent)
        return "RECORD", source

    def _record(self, source):
        line = source.next_line
        if self.patterns['blank-line'].match(line):
            return "SAM", source
        else:
            field_values = [x.strip() for x in line.split(',')]
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", source

    def _sam(self, source):
        try:
            line = source.next_line
        except EOFError:
            return "END", source
        if self.patterns['comment'].match(line):
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", source
        elif self.patterns['block-start'].match(line):
            return "BLOCK", source
        elif self.patterns['blank-line'].match(line):
            return "SAM", source
        elif self.patterns['codeblock-start'].match(line):
            return "CODEBLOCK-START", source
        elif self.patterns['list-item'].match(line):
            return "LIST-ITEM", source
        elif self.patterns['num-list-item'].match(line):
            return "NUM-LIST-ITEM", source
        elif self.patterns['block-insert'].match(line):
            return "BLOCK-INSERT", source
        elif self.patterns['paragraph-start'].match(line):
            return "PARAGRAPH-START", source
        else:
            raise Exception("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)
Exemplo n.º 35
0
		return True
	else:
		return False

_0to1 = Path('0 to state 1').from_(state0).to_(state1).when('go to 1')

_0to2 = Path('0 to state 2').from_(state0).to_(state2).when('go to 2')

_1to2 = Path('1 to state 2').from_(state1).to_(state2).when(testfunc1)

_2to3 = Path('2 to state 3').from_(state2).to_(state3).when(testfunc2)

test = TestObject(state0)


#and start loop
while(True):
	line = raw_input('input>>')
	if line == 'quit':
		break
	pre_state = test.state.state_desc
	output = statemachine.run(test,line) 
	if output is not False:
		print 'output is :'+output
		print 'state change: from %s to %s.' % (pre_state,test.state.state_desc)
	else:
		print 'invalid input:'+line
		


Exemplo n.º 36
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("CODE-START", self._code_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.add_state("CHARACTER-ENTITY", self._character_entity)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            'escape':
            re.compile(r'\\', re.U),
            'escaped-chars':
            re.compile(r'[\\\(\{\}\[\]_\*,\.\*`"&]', re.U),
            'annotation':
            re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\s*(~(?P<language>[\w-]+))?\))?',
                re.U),
            'bold':
            re.compile(r'\*(?P<text>((?<=\\)\*|[^\*])*)(?<!\\)\*', re.U),
            'italic':
            re.compile(r'_(?P<text>((?<=\\)_|[^_])*)(?<!\\)_', re.U),
            'code':
            re.compile(r'`(?P<text>(``|[^`])*)`', re.U),
            'quotes':
            re.compile(r'"(?P<text>((?<=\\)"|[^"])*)(?<!\\)"', re.U),
            'inline-insert':
            re.compile(r'>\((?P<attributes>.*?)\)', re.U),
            'character-entity':
            re.compile(r'&(\#[0-9]+|#[xX][0-9a-fA-F]+|[\w]+);'),
            'citation':
            re.compile(
                r'(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])',
                re.U)
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ''
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ''
            return "END", para
        if char == '\\':
            return "ESCAPE", para
        elif char == '{':
            return "ANNOTATION-START", para
        elif char == '[':
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "CODE-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        elif char == "&":
            return "CHARACTER-ENTITY", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns['annotation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            annotation_type = match.group('type')
            language = match.group('language')
            text = self._unescape(match.group("text"))

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None and not language:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else output a warning.
                    else:
                        self.current_string += text
                        SAM_parser_warning(
                            "Blank annotation found: {" + text + "} " +
                            "If you are trying to insert curly braces " +
                            "into the document, use \{" + text +
                            "]. Otherwise, make sure annotated text matches "
                            "previous annotation exactly.")
            else:
                #Check for link shortcut
                if urlparse(annotation_type, None).scheme is not None:
                    specifically = annotation_type
                    annotation_type = 'link'
                else:
                    specifically = match.group('specifically') if match.group(
                        'specifically') is not None else None
                namespace = match.group('namespace').strip() if match.group(
                    'namespace') is not None else None
                self.flow.append(
                    Annotation(annotation_type, text, specifically, namespace,
                               language))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '{'
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns['citation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''

            try:
                idref = match.group('id')
            except IndexError:
                idref = None
            try:
                nameref = match.group('name')
            except IndexError:
                nameref = None
            try:
                citation = match.group('citation')
            except IndexError:
                citation = None

            if idref:
                citation_type = 'idref'
                citation_value = idref.strip()
                extra = match.group('id_extra')
            elif nameref:
                citation_type = 'nameref'
                citation_value = nameref.strip()
                extra = match.group('name_extra')
            else:
                citation_type = 'citation'
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '['
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns['bold'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('bold', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '*'
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns['italic'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('italic', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '_'
        return "PARA", para

    def _code_start(self, para):
        match = self.patterns['code'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('code', (match.group("text")).replace("``", "`")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '`'
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns['quotes'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                Annotation('quotes', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns['inline-insert'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(
                InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns['inline-insert_id'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert('reference', match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _character_entity(self, para):
        match = self.patterns['character-entity'].match(para.rest_of_para)
        if match:
            self.current_string += self.patterns['character-entity'].sub(
                self._replace_charref, match.group(0))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '&'
        return "PARA", para

    def _replace_charref(self, match):
        try:
            charref = match.group(0)
        except AttributeError:
            charref = match
        character = html.unescape(charref)
        if character == charref:  # Escape not recognized
            raise SAMParserError("Unrecognized character entity found: " +
                                 charref)
        return character

    def _escape(self, para):
        char = para.next_char
        if self.patterns['escaped-chars'].match(char):
            self.current_string += char
        else:
            self.current_string += '\\' + char
        return "PARA", para

    def _unescape(self, string):
        result = ''
        e = enumerate(string)
        for pos, char in e:
            try:
                if char == '\\' and self.patterns['escaped-chars'].match(
                        string[pos + 1]):
                    result += string[pos + 1]
                    next(e, None)
                elif char == '&':
                    match = self.patterns['character-entity'].match(
                        string[pos:])
                    if match:
                        result += self.patterns['character-entity'].sub(
                            self._replace_charref, match.group(0))
                        for i in range(0, len(match.group(0))):
                            next(e, None)
                    else:
                        result += char
                else:
                    result += char
            except IndexError:
                result += char
        return result
Exemplo n.º 37
0
else:
    startstate = MetaClone

# STATEMACHINE
sm = StateMachine(0.1, startstate, globals(), locals())

BTRACE_POLL_COUNT = 0


def btrace_poller(smobj):
    global BTRACE_POLL_COUNT
    if btrace.blkparse is not None and btrace.parser is not None:
        lines_read = btrace.parser.read_btrace()
        BTRACE_POLL_COUNT += 1
        if lines_read > 0:
            # unused are marked finished so when ANDed using ddrescuelog
            # only definitely unused parts remain finished
            btracelog = btrace.parser.write_ddrescuelog(
                OPTIONS, 'non-tried', 'finished', 0, DEVSIZE)
            if ddrescue.VIEWER is not None:
                shutil.copyfile(btracelog, ddrescue.ddrlog)


sm.add_persistent_task(btrace_poller)

# EXECUTE
sm.run()

# CLEANUP BEFORE NORMAL EXIT
cleanup()
Exemplo n.º 38
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM",
                                    self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_text_block = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'sam-declaration':
            re.compile(
                r'sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?',
                re.U),
            'comment':
            re.compile(re_indent + re_comment, re.U),
            'block-start':
            re.compile(
                re_indent + re_name + r':' + re_attributes + re_content + r'?',
                re.U),
            'codeblock-start':
            re.compile(
                re_indent +
                r'(?P<flag>```[^\s\(]*)(\((?P<language>\S*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?',
                re.U),
            'blockquote-start':
            re.compile(
                re_indent + r'("""|\'\'\'|blockquote:)' + re_attributes +
                r'((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?',
                re.U),
            'fragment-start':
            re.compile(re_indent + r'~~~' + re_attributes, re.U),
            'paragraph-start':
            re.compile(r'\w*', re.U),
            'line-start':
            re.compile(
                re_indent + r'\|' + re_attributes + re_one_space + re_content,
                re.U),
            'blank-line':
            re.compile(r'^\s*$'),
            'record-start':
            re.compile(re_indent + re_name + r'::(?P<field_names>.*)', re.U),
            'list-item':
            re.compile(
                re_indent + re_ul_marker + re_attributes + re_spaces +
                re_content, re.U),
            'num-list-item':
            re.compile(
                re_indent + re_ol_marker + re_attributes + re_spaces +
                re_content, re.U),
            'labeled-list-item':
            re.compile(
                re_indent + re_ll_marker + re_attributes + re_spaces +
                re_content, re.U),
            'block-insert':
            re.compile(re_indent + r'>>>' + re_attributes, re.U),
            'string-def':
            re.compile(re_indent + r'\$' + re_name + '=' + re_content, re.U),
            'embedded-xml':
            re.compile(re_indent + r'(?P<xmltag>\<\?xml.+)', re.U)
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError(
                "Document ended before structure was complete.")

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns['sam-declaration'].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        block_name = match.group("name").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        parsed_content = None if content == '' else para_parser.parse(
            content, self.doc)
        self.doc.new_block(block_name, attributes, parsed_content, indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns['codeblock-end'] = re.compile(r'(\s*)' + codeblock_flag +
                                                    '\s*$')

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes['language'] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block('codeblock', attributes, None, indent)
        self.current_text_block = TextBlock()
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_text_block))
            self.current_text_block = None
            return "SAM", context
        else:
            self.current_text_block.append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        # TODO: Refactor this with the paraparser version

        extra = source.current_line.rstrip()[len(match.group(0)):]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " +
                                 extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block('blockquote', attributes, None, indent)

        #see if there is a citation
        try:
            idref = match.group('id')
        except IndexError:
            idref = None
        try:
            nameref = match.group('name')
        except IndexError:
            nameref = None
        try:
            citation = match.group('citation')
        except IndexError:
            citation = None

        if idref:
            citation_type = 'idref'
            citation_value = idref.strip()
            extra = match.group('id_extra')
        elif nameref:
            citation_type = 'nameref'
            citation_value = nameref.strip()
            extra = match.group('name_extra')
        elif citation:
            citation_type = 'citation'
            citation_value = citation.strip()
        else:
            citation_type = None

        if citation_type:
            cit = (Citation(citation_type, citation_value, extra))
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block('fragment', attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, '', local_indent)
        self.current_text_block = TextBlock(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "END", context

        if self.patterns['blank-line'].match(line):
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(['p', 'li']):
            if self.patterns['list-item'].match(
                    line) or self.patterns['num-list-item'].match(
                        line) or self.patterns['labeled-list-item'].match(
                            line):
                f = para_parser.parse(self.current_text_block.text, self.doc)
                self.current_text_block = None
                self.doc.new_flow(f)
                source.return_line()
                return "SAM", context

        self.current_text_block.append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_unordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_ordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_labeled_list_item(attributes, indent, label)
        self.current_text_block = TextBlock(
            str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert",
                           attributes=parse_insert(match.group("attributes")),
                           text=None,
                           indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group('name'),
                                para_parser.parse(match.group('content'),
                                                  self.doc),
                                indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block('line',
                           self.parse_block_attributes(
                               match.group("attributes")),
                           para_parser.parse(match.group('content'),
                                             self.doc,
                                             strip=False),
                           indent=indent)
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("name").strip()
        field_names = [
            x.strip() for x in match.group("field_names").split(',')
        ]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns['blank-line'].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r'(?<!\\),', line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError(
                    "Record length does not match record set header. At:\n\n "
                    + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler = self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code == 9:  #junk after document element
                source.return_line()
                xml_text = ''.join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError(
                "The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError(
                "The encoding of an embedded XML fragment must be UTF-8.")

    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns['comment'].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns['record-start'].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns['blank-line'].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns['codeblock-start'].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns['blockquote-start'].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns['fragment-start'].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns['list-item'].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns['num-list-item'].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns['labeled-list-item'].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns['block-insert'].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns['string-def'].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns['line-start'].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns['embedded-xml'].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns['block-start'].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns['paragraph-start'].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [
            x for x in attributes_list if not (x[0] in '?#*~')
        ]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(
                ', '.join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == '*']
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " +
                                 ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == '#']
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " +
                                 ", ".join(names))
        language = [x[1:] for x in attributes_list if x[0] == '~']
        if len(language) > 1:
            raise SAMParserError("More than one language specified: " +
                                 ", ".join(language))
        conditions = [x[1:] for x in attributes_list if x[0] == '?']
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if language:
            result["xml:lang"] = "".join(language)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Exemplo n.º 39
0
        reason = 'EHTML'
        newstate = 'error_state'
        print reason,i
    return (newstate,txt)
    

    
if __name__=='__main__':
    m = StateMachine()
    m.add_state('orig',doctype_way)
    m.add_state('doctype_state',doctype_state_way )
    m.add_state("dhtml_state",dhtml_state_way)
    m.add_state("shtml_state",shtml_state_way)
    m.add_state("shead_state",shead_state_way)
    m.add_state("stitle_state",stitle_state_way)
    m.add_state("etitle_state",etitle_state_way)
    m.add_state("ehead_state",ehead_state_way)
    m.add_state("sbody_state",sbody_state_way)
    m.add_state("sp_state",sp_state_way)
    m.add_state("ep_state",ep_state_way)
    m.add_state("sa_state",sa_state_way)
    m.add_state("ea_state",ea_state_way)
    m.add_state("ebody_state",ebody_state_way)
    m.add_state("finstate",None,end_state=1)
    m.add_state("error_state", None, end_state=1)
    m.set_start('orig')
    m.run(open('1.txt').read())
    
relink = re.compile(r'<A href="(.*)".*>')
print relink.findall(open('1.txt').read())
Exemplo n.º 40
0
class SamParser:
    def __init__(self):

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("NEW", self._new_file)
        self.stateMachine.add_state("SAM", self._sam)
        self.stateMachine.add_state("BLOCK", self._block)
        self.stateMachine.add_state("CODEBLOCK-START", self._codeblock_start)
        self.stateMachine.add_state("CODEBLOCK", self._codeblock)
        self.stateMachine.add_state("BLOCKQUOTE-START", self._blockquote_start)
        self.stateMachine.add_state("FRAGMENT-START", self._fragment_start)
        self.stateMachine.add_state("PARAGRAPH-START", self._paragraph_start)
        self.stateMachine.add_state("PARAGRAPH", self._paragraph)
        self.stateMachine.add_state("RECORD-START", self._record_start)
        self.stateMachine.add_state("RECORD", self._record)
        self.stateMachine.add_state("LIST-ITEM", self._list_item)
        self.stateMachine.add_state("NUM-LIST-ITEM", self._num_list_item)
        self.stateMachine.add_state("LABELED-LIST-ITEM", self._labeled_list_item)
        self.stateMachine.add_state("BLOCK-INSERT", self._block_insert)
        self.stateMachine.add_state("STRING-DEF", self._string_def)
        self.stateMachine.add_state("LINE-START", self._line_start)
        self.stateMachine.add_state("EMBEDDED-XML", self._embedded_xml)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.set_start("NEW")
        self.current_text_block = None
        self.doc = DocStructure()
        self.source = None
        self.patterns = {
            'sam-declaration': re.compile(r'sam:\s*(?:(?:\{(?P<namespace>\S+?)\})|(?P<schema>\S+))?', re.U),
            'comment': re.compile(re_indent + re_comment, re.U),
            'block-start': re.compile(re_indent + re_name + r':' + re_attributes + re_content + r'?', re.U),
            'codeblock-start': re.compile(re_indent + r'(?P<flag>```[^\s\(]*)(\((?P<language>\S*)\s*(["\'](?P<source>.+?)["\'])?\s*(\((?P<namespace>\S+?)\))?(?P<other>.+?)?\))?', re.U),
            'blockquote-start': re.compile(re_indent + r'("""|\'\'\'|blockquote:)' + re_attributes + r'((\[\s*\*(?P<id>\S+)(?P<id_extra>.+?)\])|(\[\s*\#(?P<name>\S+)(?P<name_extra>.+?)\])|(\[\s*(?P<citation>.*?)\]))?', re.U),
            'fragment-start': re.compile(re_indent + r'~~~' + re_attributes, re.U),
            'paragraph-start': re.compile(r'\w*', re.U),
            'line-start': re.compile(re_indent + r'\|' + re_attributes + re_one_space + re_content, re.U),
            'blank-line': re.compile(r'^\s*$'),
            'record-start': re.compile(re_indent + re_name + r'::(?P<field_names>.*)', re.U),
            'list-item': re.compile(re_indent + re_ul_marker + re_attributes + re_spaces + re_content, re.U),
            'num-list-item': re.compile(re_indent + re_ol_marker + re_attributes + re_spaces + re_content, re.U),
            'labeled-list-item': re.compile(re_indent + re_ll_marker + re_attributes + re_spaces + re_content, re.U),
            'block-insert': re.compile(re_indent + r'>>>' + re_attributes, re.U),
            'string-def': re.compile(re_indent + r'\$' + re_name + '=' + re_content, re.U),
            'embedded-xml': re.compile(re_indent + r'(?P<xmltag>\<\?xml.+)', re.U)
        }

    def parse(self, source):
        self.source = StringSource(source)
        try:
            self.stateMachine.run(self.source)
        except EOFError:
            raise SAMParserError("Document ended before structure was complete.")

    def _new_file(self, source):
        line = source.next_line
        match = self.patterns['sam-declaration'].match(line)
        if match:
            self.doc.new_root(match)
            return "SAM", (source, None)
        else:
            raise SAMParserError("Not a SAM file!")

    def _block(self, context):
        source, match = context
        indent = len(match.group("indent"))
        block_name = match.group("name").strip()
        attributes = self.parse_block_attributes(match.group("attributes"))
        content = match.group("content")
        parsed_content = None if content == '' else para_parser.parse(content, self.doc)
        self.doc.new_block(block_name, attributes, parsed_content, indent)
        return "SAM", context

    def _codeblock_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        codeblock_flag = match.group("flag")
        self.patterns['codeblock-end'] = re.compile(r'(\s*)' + codeblock_flag + '\s*$')

        attributes = {}

        language = match.group("language")
        if language is not None:
            attributes['language'] = language

        source = match.group("source")
        if source is not None:
            attributes["source"] = source

        namespace = match.group("namespace")
        if namespace is not None:
            attributes["namespace"] = namespace

        other = match.group("other")
        if other is not None:
            attributes.update(self.parse_block_attributes(other))

        self.doc.new_block('codeblock', attributes, None, indent)
        self.current_text_block = TextBlock()
        return "CODEBLOCK", context

    def _codeblock(self, context):
        source, match = context
        line = source.next_line
        if self.patterns['codeblock-end'].match(line):
            self.doc.new_flow(Pre(self.current_text_block))
            self.current_text_block = None
            return "SAM", context
        else:
            self.current_text_block.append(line)
            return "CODEBLOCK", context

    def _blockquote_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        # TODO: Refactor this with the paraparser version


        extra=source.current_line.rstrip()[len(match.group(0)):]
        if extra:
            raise SAMParserError("Extra text found after blockquote start: " + extra)

        attributes = self.parse_block_attributes(match.group("attributes"))

        b = self.doc.new_block('blockquote', attributes, None, indent)

        #see if there is a citation
        try:
            idref = match.group('id')
        except IndexError:
            idref=None
        try:
            nameref = match.group('name')
        except IndexError:
            nameref = None
        try:
            citation = match.group('citation')
        except IndexError:
            citation=None

        if idref:
            citation_type = 'idref'
            citation_value = idref.strip()
            extra = match.group('id_extra')
        elif nameref:
            citation_type = 'nameref'
            citation_value = nameref.strip()
            extra = match.group('name_extra')
        elif citation:
            citation_type = 'citation'
            citation_value = citation.strip()
        else:
            citation_type=None

        if citation_type:
            cit = (Citation(citation_type, citation_value, extra))
            b.add_child(cit)

        return "SAM", context

    def _fragment_start(self, context):
        source, match = context
        indent = len(match.group('indent'))

        attributes = {}

        attributes_string = match.group("attributes")
        if attributes_string is not None:
            attributes.update(self.parse_block_attributes(attributes_string))

        self.doc.new_block('fragment', attributes, None, indent)
        return "SAM", context

    def _paragraph_start(self, context):
        source, match = context
        line = source.current_line
        local_indent = len(line) - len(line.lstrip())
        self.doc.new_paragraph(None, '', local_indent)
        self.current_text_block = TextBlock(line)
        return "PARAGRAPH", context

    def _paragraph(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "END", context

        if self.patterns['blank-line'].match(line):
            f = para_parser.parse(self.current_text_block.text, self.doc)
            self.current_text_block = None
            self.doc.new_flow(f)
            return "SAM", context

        if self.doc.in_context(['p', 'li']):
            if self.patterns['list-item'].match(line) or self.patterns['num-list-item'].match(line) or self.patterns['labeled-list-item'].match(line):
                f = para_parser.parse(self.current_text_block.text, self.doc)
                self.current_text_block = None
                self.doc.new_flow(f)
                source.return_line()
                return "SAM", context

        self.current_text_block.append(line)
        return "PARAGRAPH", context

    def _list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_unordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _num_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_ordered_list_item(attributes, indent)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _labeled_list_item(self, context):
        source, match = context
        indent = len(match.group("indent"))
        label = match.group("label")
        attributes = self.parse_block_attributes(match.group("attributes"))
        self.doc.new_labeled_list_item(attributes, indent, label)
        self.current_text_block = TextBlock(str(match.group("content")).strip())
        return "PARAGRAPH", context

    def _block_insert(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block("insert", attributes=parse_insert(match.group("attributes")), text=None, indent=indent)
        return "SAM", context

    def _string_def(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_string_def(match.group('name'), para_parser.parse(match.group('content'), self.doc), indent=indent)
        return "SAM", context

    def _line_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        self.doc.new_block('line', self.parse_block_attributes(match.group("attributes")), para_parser.parse(match.group('content'), self.doc, strip=False), indent=indent)
        return "SAM", context

    def _record_start(self, context):
        source, match = context
        indent = len(match.group("indent"))
        record_name = match.group("name").strip()
        field_names = [x.strip() for x in match.group("field_names").split(',')]
        self.doc.new_record_set(record_name, field_names, indent)
        return "RECORD", context

    def _record(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context
        indent = len(line) - len(line.lstrip())
        if self.patterns['blank-line'].match(line):
            return "SAM", context
        elif indent < self.doc.current_block.indent:
            source.return_line()
            return "SAM", context
        else:
            field_values = [x.strip() for x in re.split(r'(?<!\\),',line)]
            if len(field_values) != len(self.doc.fields):
                raise SAMParserError("Record length does not match record set header. At:\n\n " + line)
            record = list(zip(self.doc.fields, field_values))
            self.doc.new_record(record)
            return "RECORD", context

    def _embedded_xml(self, context):
        source, match = context
        indent = len(match.group("indent"))
        embedded_xml_parser = xml.parsers.expat.ParserCreate()
        embedded_xml_parser.XmlDeclHandler=self._embedded_xml_declaration_check
        embedded_xml_parser.Parse(source.current_line.strip())
        xml_lines = []
        try:
            while True:
                line = source.next_line
                xml_lines.append(line)
                embedded_xml_parser.Parse(line)
        except xml.parsers.expat.ExpatError as err:
            if err.code==9: #junk after document element
                source.return_line()
                xml_text = ''.join(xml_lines[:-1])
                self.doc.new_embedded_xml(xml_text, indent)
                return "SAM", context
            else:
                raise

    def _embedded_xml_declaration_check(self, version, encoding, standalone):
        if version != "1.0":
            raise SAMParserError("The version of an embedded XML fragment must be 1.0.")
        if encoding.upper() != "UTF-8":
            raise SAMParserError("The encoding of an embedded XML fragment must be UTF-8.")



    def _sam(self, context):
        source, match = context
        try:
            line = source.next_line
        except EOFError:
            return "END", context

        match = self.patterns['comment'].match(line)
        if match is not None:
            self.doc.new_comment(Comment(line.strip()[1:]))
            return "SAM", (source, match)

        match = self.patterns['record-start'].match(line)
        if match is not None:
            return "RECORD-START", (source, match)

        match = self.patterns['blank-line'].match(line)
        if match is not None:
            return "SAM", (source, match)

        match = self.patterns['codeblock-start'].match(line)
        if match is not None:
            return "CODEBLOCK-START", (source, match)

        match = self.patterns['blockquote-start'].match(line)
        if match is not None:
            return "BLOCKQUOTE-START", (source, match)

        match = self.patterns['fragment-start'].match(line)
        if match is not None:
            return "FRAGMENT-START", (source, match)

        match = self.patterns['list-item'].match(line)
        if match is not None:
            return "LIST-ITEM", (source, match)

        match = self.patterns['num-list-item'].match(line)
        if match is not None:
            return "NUM-LIST-ITEM", (source, match)

        match = self.patterns['labeled-list-item'].match(line)
        if match is not None:
            return "LABELED-LIST-ITEM", (source, match)

        match = self.patterns['block-insert'].match(line)
        if match is not None:
            return "BLOCK-INSERT", (source, match)

        match = self.patterns['string-def'].match(line)
        if match is not None:
            return "STRING-DEF", (source, match)

        match = self.patterns['line-start'].match(line)
        if match is not None:
            return "LINE-START", (source, match)

        match = self.patterns['embedded-xml'].match(line)
        if match is not None:
            return "EMBEDDED-XML", (source, match)

        match = self.patterns['block-start'].match(line)
        if match is not None:
            return "BLOCK", (source, match)

        match = self.patterns['paragraph-start'].match(line)
        if match is not None:
            return "PARAGRAPH-START", (source, match)

        raise SAMParserError("I'm confused")

    def serialize(self, serialize_format):
        return self.doc.serialize(serialize_format)

    def parse_block_attributes(self, attributes_string):
        result = {}
        try:
            attributes_list = attributes_string.split()
        except AttributeError:
            return None
        unexpected_attributes = [x for x in attributes_list if not (x[0] in '?#*~')]
        if unexpected_attributes:
            raise SAMParserError("Unexpected attribute(s): {0}".format(', '.join(unexpected_attributes)))
        ids = [x[1:] for x in attributes_list if x[0] == '*']
        if len(ids) > 1:
            raise SAMParserError("More than one ID specified: " + ", ".join(ids))
        names = [x[1:] for x in attributes_list if x[0] == '#']
        if len(names) > 1:
            raise SAMParserError("More than one name specified: " + ", ".join(names))
        language = [x[1:] for x in attributes_list if x[0] == '~']
        if len(language) > 1:
            raise SAMParserError("More than one language specified: " + ", ".join(language))
        conditions = [x[1:] for x in attributes_list if x[0] == '?']
        if ids:
            if ids[0] in self.doc.ids:
                raise SAMParserError("Duplicate ID found: " + ids[0])
            self.doc.ids.extend(ids)
            result["id"] = "".join(ids)
        if names:
            result["name"] = "".join(names)
        if language:
            result["xml:lang"] = "".join(language)
        if conditions:
            result["conditions"] = " ".join(conditions)
        return result
Exemplo n.º 41
0
        newState = "groen"
        time = 30
    else:
        newState = "rood"
        time = time - 1

    return (newState, time, car)


def timergroen(time, car):
    if time <= 0:
        newState = "rood"
        time = 30
    else:
        newState = "groen"
        time = time - 1

    if (car > 0):
        car = car - 1

    return (newState, time, car)


if __name__ == "__main__":
    m = StateMachine()
    m.add_state("rood", timerrood, end_state=1)
    m.add_state("groen", timergroen)
    m.set_start("rood")
    m.run(30, 0)

# https://www.python-course.eu/finite_state_machine.php
Exemplo n.º 42
0
class SamParaParser:
    def __init__(self):
        # These attributes are set by the parse method
        self.doc = None
        self.para = None
        self.current_string = None
        self.flow = None

        self.stateMachine = StateMachine()
        self.stateMachine.add_state("PARA", self._para)
        self.stateMachine.add_state("ESCAPE", self._escape)
        self.stateMachine.add_state("END", None, end_state=1)
        self.stateMachine.add_state("ANNOTATION-START", self._annotation_start)
        self.stateMachine.add_state("CITATION-START", self._citation_start)
        self.stateMachine.add_state("BOLD-START", self._bold_start)
        self.stateMachine.add_state("ITALIC-START", self._italic_start)
        self.stateMachine.add_state("CODE-START", self._code_start)
        self.stateMachine.add_state("QUOTES-START", self._quotes_start)
        self.stateMachine.add_state("INLINE-INSERT", self._inline_insert)
        self.stateMachine.add_state("CHARACTER-ENTITY", self._character_entity)
        self.stateMachine.set_start("PARA")
        self.patterns = {
            'escape': re.compile(r'\\', re.U),
            'escaped-chars': re.compile(r'[\\\(\{\}\[\]_\*,\.\*`"&]', re.U),
            'annotation': re.compile(
                r'(?<!\\)\{(?P<text>.*?)(?<!\\)\}(\(\s*(?P<type>\S*?\s*[^\\"\']?)(["\'](?P<specifically>.*?)["\'])??\s*(\((?P<namespace>\w+)\))?\s*(~(?P<language>[\w-]+))?\))?', re.U),
            'bold': re.compile(r'\*(?P<text>((?<=\\)\*|[^\*])*)(?<!\\)\*', re.U),
            'italic': re.compile(r'_(?P<text>((?<=\\)_|[^_])*)(?<!\\)_', re.U),
            'code': re.compile(r'`(?P<text>(``|[^`])*)`', re.U),
            'quotes': re.compile(r'"(?P<text>((?<=\\)"|[^"])*)(?<!\\)"', re.U),
            'inline-insert': re.compile(r'>\((?P<attributes>.*?)\)', re.U),
            'character-entity': re.compile(r'&(\#[0-9]+|#[xX][0-9a-fA-F]+|[\w]+);'),
            'citation': re.compile(r'(\[\s*\*(?P<id>\S+)(\s+(?P<id_extra>.+?))?\])|(\[\s*\#(?P<name_name>\S+)(\s+(?P<extra>.+?))?\])|(\[\s*(?P<citation>.*?)\])', re.U)
        }

    def parse(self, para, doc, strip=True):
        if para is None:
            return None
        self.doc = doc
        self.para = Para(para, strip)
        self.current_string = ''
        self.flow = Flow()
        self.stateMachine.run(self.para)
        return self.flow

    def _para(self, para):
        try:
            char = para.next_char
        except IndexError:
            self.flow.append(self.current_string)
            self.current_string = ''
            return "END", para
        if char == '\\':
            return "ESCAPE", para
        elif char == '{':
            return "ANNOTATION-START", para
        elif char == '[':
            return "CITATION-START", para
        elif char == "*":
            return "BOLD-START", para
        elif char == "_":
            return "ITALIC-START", para
        elif char == "`":
            return "CODE-START", para
        elif char == '"':
            return "QUOTES-START", para
        elif char == ">":
            return "INLINE-INSERT", para
        elif char == "&":
            return "CHARACTER-ENTITY", para
        else:
            self.current_string += char
            return "PARA", para

    def _annotation_start(self, para):
        match = self.patterns['annotation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            annotation_type = match.group('type')
            language = match.group('language')
            text = self._unescape(match.group("text"))

            # If there is an annotated phrase with no annotation, look back
            # to see if it has been annotated already, and if so, copy the
            # closest preceding annotation.
            if annotation_type is None and not language:
                # First look back in the current flow
                # (which is not part of the doc structure yet).
                previous = self.flow.find_last_annotation(text)
                if previous is not None:
                    self.flow.append(previous)
                else:
                    # Then look back in the document.
                    previous = self.doc.find_last_annotation(text)
                    if previous is not None:
                        self.flow.append(previous)

                    # Else output a warning.
                    else:
                        self.current_string += text
                        SAM_parser_warning(
                                "Blank annotation found: {" +
                                text + "} " +
                                "If you are trying to insert curly braces " +
                                "into the document, use \{" + text +
                                "]. Otherwise, make sure annotated text matches "
                                "previous annotation exactly."
                        )
            else:
                #Check for link shortcut
                if urlparse(annotation_type,None).scheme is not None:
                    specifically = annotation_type
                    annotation_type='link'
                else:
                    specifically = match.group('specifically') if match.group('specifically') is not None else None
                namespace = match.group('namespace').strip() if match.group('namespace') is not None else None
                self.flow.append(Annotation(annotation_type, text, specifically, namespace, language))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '{'
            return "PARA", para

    def _citation_start(self, para):
        match = self.patterns['citation'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''

            try:
                idref = match.group('id')
            except IndexError:
                idref=None
            try:
                nameref = match.group('name')
            except IndexError:
                nameref = None
            try:
                citation = match.group('citation')
            except IndexError:
                citation=None

            if idref:
                citation_type = 'idref'
                citation_value = idref.strip()
                extra = match.group('id_extra')
            elif nameref:
                citation_type = 'nameref'
                citation_value = nameref.strip()
                extra = match.group('name_extra')
            else:
                citation_type = 'citation'
                citation_value = citation.strip()
                extra = None

            self.flow.append(Citation(citation_type, citation_value, extra))
            para.advance(len(match.group(0)) - 1)
            return "PARA", para
        else:
            self.current_string += '['
            return "PARA", para

    def _bold_start(self, para):
        match = self.patterns['bold'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('bold', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '*'
        return "PARA", para

    def _italic_start(self, para):
        match = self.patterns['italic'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('italic', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '_'
        return "PARA", para

    def _code_start(self, para):
        match = self.patterns['code'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('code', (match.group("text")).replace("``", "`")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '`'
        return "PARA", para

    def _quotes_start(self, para):
        match = self.patterns['quotes'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(Annotation('quotes', self._unescape(match.group("text"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '"'
        return "PARA", para

    def _inline_insert(self, para):
        match = self.patterns['inline-insert'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert(parse_insert(match.group("attributes"))))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _inline_insert_id(self, para):
        match = self.patterns['inline-insert_id'].match(para.rest_of_para)
        if match:
            self.flow.append(self.current_string)
            self.current_string = ''
            self.flow.append(InlineInsert('reference', match.group("id")))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '>'
        return "PARA", para

    def _character_entity(self, para):
        match = self.patterns['character-entity'].match(para.rest_of_para)
        if match:
            self.current_string += self.patterns['character-entity'].sub(self._replace_charref, match.group(0))
            para.advance(len(match.group(0)) - 1)
        else:
            self.current_string += '&'
        return "PARA", para

    def _replace_charref(self, match):
        try:
            charref = match.group(0)
        except AttributeError:
           charref = match
        character = html.unescape(charref)
        if character == charref:  # Escape not recognized
            raise SAMParserError("Unrecognized character entity found: " + charref)
        return character

    def _escape(self, para):
        char = para.next_char
        if self.patterns['escaped-chars'].match(char):
            self.current_string += char
        else:
            self.current_string += '\\' + char
        return "PARA", para

    def _unescape(self, string):
        result = ''
        e = enumerate(string)
        for pos, char in e:
            try:
                if char == '\\' and self.patterns['escaped-chars'].match(string[pos+1]):
                    result += string[pos+1]
                    next(e, None)
                elif char == '&':
                    match = self.patterns['character-entity'].match(string[pos:])
                    if match:
                        result += self.patterns['character-entity'].sub(self._replace_charref, match.group(0))
                        for i in range(0, len(match.group(0))):
                            next(e, None)
                    else:
                        result += char
                else:
                    result += char
            except IndexError:
                result += char
        return result
Exemplo n.º 43
0
        else:
            print(" #%2.1f+" % val)
        val = math_func(val)
    print( " >>")
    return (newState, val)
def twenties_counter(val):
    print ("TWENTIES State:",)
    while 1:
        if val <= 0 or val >= 30:
            newState = "Out_of_Range"; break
        elif 1 <= val < 10:
            newState = "ONES"; break
        elif 10 <= val < 20:
            newState = "TENS"; break
        else:
            print (" *%2.1f+" % val)
        val = math_func(val)
    print( " >>")
    return (newState, val)
def math_func(n):
    from math import sin
    return abs(sin(n))*31
if __name__== "__main__":
    m = StateMachine()
    m.add_state("ONES", ones_counter)
    m.add_state("TENS", tens_counter)
    m.add_state("TWENTIES", twenties_counter)
    m.add_state("OUT_OF_RANGE", None, end_state=1)
    m.set_start("ONES")
    m.run(1)
    def go(self):
        m = StateMachine()
        # for  tmp in self.__cargos:
        #     print(tmp)
        '''Present'''
        m.add_state("Start", self.start_transitions)
        m.add_state('1A_state', self._1A_state_transitions)
        m.add_state('1C_state', None, end_state=1)
        m.add_state('2A_state', self._2A_state_transitions)
        m.add_state('2B_state', None, end_state=1)
        m.add_state('2C_state', None, end_state=1)

        m.add_state('Z1_VBP_state', self._Z1_VBP_state_transitions)
        m.add_state('Z1_VBZ_state', self._Z1_VBZ_state_transitions)
        m.add_state('1B_VBP_state', None, end_state=1)
        m.add_state('1B_VBZ_state', None, end_state=1)
        m.add_state('1D_state', None, end_state=1)  ### TODO: review, we add quan-subj state for uw-166

        m.add_state('Z2_state', self._Z2_state_transitions)
        m.add_state('3A_state', self._3A_state_transitions)
        m.add_state('3B_state', None, end_state=1)
        m.add_state('3C_state', None, end_state=1)
        m.add_state('4A_state', self._4A_state_transitions)
        m.add_state('4B_state', None, end_state=1)
        m.add_state('4C_state', None, end_state=1)

        '''Past'''
        m.add_state('5A_state', self._5A_state_transitions)
        m.add_state('5C_state', None, end_state=1)
        m.add_state('6A_state', self._6A_state_transitions)
        m.add_state('6B_state', None, end_state=1)
        m.add_state('6C_state', None, end_state=1)

        m.add_state('Z3_state', self._Z3_state_transitions)
        m.add_state('5B_state', None, end_state=1)

        m.add_state('Z4_state', self._Z4_state_transitions)
        m.add_state('7A_state', self._7A_state_transitions)
        m.add_state('7B_state', None, end_state=1)
        m.add_state('7C_state', None, end_state=1)
        m.add_state('8A_state', self._8A_state_transitions)
        m.add_state('8B_state', None, end_state=1)
        m.add_state('8C_state', None, end_state=1)

        '''Future and MD'''
        m.add_state('ZM_state', self._ZM_state_transitions)
        m.add_state('9A_state', self._9A_state_transitions)
        m.add_state('9C_state', None, end_state=1)
        m.add_state('10A_state', self._10A_state_transitions)
        m.add_state('10B_state', None, end_state=1)
        m.add_state('10C_state', None, end_state=1)

        # m.add_state('Z3_state', self._Z3_state_transitions)
        m.add_state('9B_state', self._9B_state_transitions)

        # m.add_state('Z5_state', self._Z5_state_transitions) Z5 merged with 9B
        m.add_state('11A_state', self._11A_state_transitions)
        m.add_state('11B_state', None, end_state=1)
        m.add_state('11C_state', None, end_state=1)
        m.add_state('12A_state', self._12A_state_transitions)
        m.add_state('12B_state', None, end_state=1)
        m.add_state('12C_state', None, end_state=1)
        m.add_state('error_state', None, end_state=1)

        m.set_start("Start")
        m.run(self.__cargos, actions)

        return m.get_tvstate(), m.get_action()