def test(A_str): print "_____________________________________________________________________" if isinstance(A_str, (str, unicode)): print ("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") sm = regex.do(A_str, {}).sm else: sm = A_str print "A = ", sm result_1st = complement.do(sm) print "complement(A):", result_1st result_2nd = complement.do(result_1st) print print "union(A, complement(A)): All =", is_all(union.do([sm, result_1st])) print "intersection(A, complement(A)): None =", is_none(intersection.do([sm, result_1st])) print "identity(A, complement(complement(A)):", identity.do(sm, result_2nd)
def snap_complement(stream, PatternDict): pattern_list = snap_curly_bracketed_expression(stream, PatternDict, "complement operator", "Co") if len(pattern_list) == 1: tmp = pattern_list[0] else: tmp = union.do(pattern_list) return complement.do(tmp)
def test(SmList): print "-------------------------------------------------------------------------------" for tsm in SmList: print "##sm:", tsm sm = parallelize.do(SmList) print "RESULT:", sm complement_sm = complement.do(sm) assert all(superset.do(sm, tsm) == True for tsm in SmList) assert all( DFA.is_Empty(intersection.do([complement_sm, tsm])) for tsm in SmList)
def test(A_str): print "_____________________________________________________________________" if isinstance(A_str, (str, unicode)): print("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") sm = regex.do(A_str, {}).extract_sm() else: sm = A_str print "A = ", sm ## print "##sm:", sm.get_string(NormalizeF=False) result_1st = complement.do(sm) print "complement(A):", result_1st # .get_string(NormalizeF=False) result_2nd = complement.do(result_1st) ## print "##2nd:", result_2nd.get_string(NormalizeF=False) print print "union(A, complement(A)): All =", DFA.is_Universal( union.do([sm, result_1st])) print "intersection(A, complement(A)): None =", DFA.is_Empty( intersection.do([sm, result_1st])) print "identity(A, complement(complement(A)):", identity.do(sm, result_2nd) assert not commonality(sm, result_1st) assert not commonality(result_1st, result_2nd)
def leave_begin(DfaA, DfaB): """PURPOSE: Generate a modified DFA based on A: * matches the 'head' of lexemes of 'A' if they match 'B'. The head of a lexeme the part of 'A' which matches 'B'. SCHEME: 'LeaveBegin bbbbbbbbbb' lexemes before lexemes after aaaaaaaaaaaxxxxxxxxxx ---> bbbbbbbbbbbyyyyyyyyyy bbbbbbbbbbb """ return cut_begin(DfaA, complement.do(DfaB))
def do(A, B): A_and_B = intersection.do([A, B]) not_A_and_B = complement.do(A_and_B) # Difference: It only remains in A what is not in A and B. return intersection.do([A, not_A_and_B])
out_n = 0 def output(Sm): global out_n if "help" not in sys.argv: print "sm%i: %s" % (out_n, Sm) else: open("tmp%i.dot" % out_n, "wb").write(Sm.get_graphviz_string(NormalizeF=True)) print "written 'tmp%i.dot'" % out_n out_n += 1 sm0.mark_state_origins() sm1.mark_state_origins() sm2.mark_state_origins() output(sm0) output(sm1) output(sm2) sm_list = [sm0, sm1, sm2] sm = parallelize.do(sm_list) print "#-------------------------------------------------------------------------------" output(sm) complement_sm = complement.do(sm) assert all(superset.do(sm, tsm) == True for tsm in sm_list) assert all(DFA.is_Empty(intersection.do([complement_sm, tsm])) for tsm in sm_list) print "<terminated>"
def do(A, B): A_and_B = intersection.do([A, B]) not_A_and_B = complement.do(A_and_B) # Difference: It only remains in A what is not in A and B. return intersection.do([A, not_A_and_B])
def inv(A): return complement.do(A)
def inv(A): return complement.do(A) def rev(A): return reverse.do(A)
def snap_anti_pattern(stream, PatternDict): result = snap_curly_bracketed_expression(stream, PatternDict, "anti-pattern operator", "A")[0] return sanitizer.do(complement.do(result))
def leave_end(DfaA, DfaB): return cut_begin(DfaA, complement.do(DfaB))