def assert_considerations(A, B, result): """Set of rules which must hold in case the '\NotBegin' has been applied. """ assert superset.do(A, result) assert intersection.do([result, B]).is_Empty() assert identity.do(union.do([result, A]), A) assert intersection.do([result, derived.is_begin(A, B)]).is_Empty() assert identity.do(union.do([result, derived.is_begin(A, B)]), A)
def test(A_str, B_str): print ("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") print ("B = " + B_str).replace("\n", "\\n").replace("\t", "\\t") print "---------------------------" A = regex.do(A_str, {}).extract_sm() B = regex.do(B_str, {}).extract_sm() # Determine lexeme set before union (possible modification) ## set0 = lexeme_set.get(A) ## set1 = lexeme_set.get(B) x = union.do([A, B]) y = union.do([B, A]) assert identity.do(x, y) ## if "SequenceAndLoop" not in sys.argv: ## result = lexeme_set.get(x) ## expectation = set0 ## expectation.update(set1) ## print "#result:", lexeme_set.lexeme_set_to_characters(result) ## print "#expect:", lexeme_set.lexeme_set_to_characters(expectation) ## assert result == expectation print "union = ", x print
def __core(Original, Cutter): print("Original = " + Original).replace("\n", "\\n").replace("\t", "\\t") print("Cutter = " + Cutter).replace("\n", "\\n").replace("\t", "\\t") orig = regex.do(Original, {}).extract_sm() cutter = regex.do(Cutter, {}).extract_sm() #print orig.get_string(NormalizeF=False) #print cutter.get_string(NormalizeF=False) # ComplementBegin = intersection(P, complement(Q)\Any*) result = derived.not_begin(orig, cutter) print if not result.is_Empty(): print "superset(Original, result): %s" % superset.do( orig, result) if not result.is_Empty(): tmp = clean(intersection.do([cutter, result])) print "intersection(Cutter, result) is None: %s" % tmp.is_Empty() tmp = clean(union.do([orig, result])) print "union(Original, result) == Original: %s" % identity.do( tmp, orig) print print "result = ", result.get_string(NormalizeF=True) assert_considerations(orig, cutter, result) return result
def __core(Pattern0, Pattern1): print("Pattern0 = " + Pattern0).replace("\n", "\\n").replace("\t", "\\t") print("Pattern1 = " + Pattern1).replace("\n", "\\n").replace("\t", "\\t") p0 = regex.do(Pattern0, {}).finalize(None) p1 = regex.do(Pattern1, {}).finalize(None) verdict_f = identity_checker.do(p0, p1) print "claim = ", verdict_f
def test(sm): backup_sm = deepcopy(sm) optimal_sm = hopcroft.do(sm, CreateNewStateMachineF=CreateNewStateMachineF) print optimal_sm orphan_state_index_list = optimal_sm.get_orphaned_state_index_list() if len(orphan_state_index_list) != 0: print "ERROR: orphan states found = ", orphan_state_index_list if identity_checker.do(backup_sm, optimal_sm) == False: print "ERROR: state machines not equivalent"
def __core(Pattern0, Pattern1): print ("Pattern0 = " + Pattern0).replace("\n", "\\n").replace("\t", "\\t") print ("Pattern1 = " + Pattern1).replace("\n", "\\n").replace("\t", "\\t") p0 = regex.do(Pattern0, {}) p0.mount_post_context_sm() p0.mount_pre_context_sm() p1 = regex.do(Pattern1, {}) p1.mount_post_context_sm() p1.mount_pre_context_sm() print "claim = ", identity_checker.do(p0, p1)
def __core(Pattern0, Pattern1): print("Pattern0 = " + Pattern0).replace("\n", "\\n").replace("\t", "\\t") print("Pattern1 = " + Pattern1).replace("\n", "\\n").replace("\t", "\\t") p0 = regex.do(Pattern0, {}) p0.mount_post_context_sm() p0.mount_pre_context_sm() p1 = regex.do(Pattern1, {}) p1.mount_post_context_sm() p1.mount_pre_context_sm() print "claim = ", identity_checker.do(p0, p1)
def unary_checks(Q, operation): Q_plus = beautifier.do(repeat.do(Q)) Q_star = beautifier.do(repeat.do(Q, min_repetition_n=0)) Q_is_Q_star = identity.do(Q, Q_star) Q_is_Q_plus = identity.do(Q, Q_plus) # \Cut{Q Q} = \Nothing y = operation(Q, Q) assert y.is_Nothing() # if Q != Q+: \CutBegin{Q+ Q} = Q* if not Q_is_Q_plus: y = operation(Q_plus, Q) assert identity.do(y, Q_star) # if Q != Q*: \CutBegin{Q* Q} = Q* if not Q_is_Q_star: y = operation(Q_star, Q) assert identity.do(y, Q_star) # \Cut{Q \Nothing} = Q y = operation(Q, DFA.Nothing()) assert identity.do(y, Q) # \Cut{\Nothing Q} = \Nothing y = operation(DFA.Nothing(), Q) assert y.is_Nothing() # \Cut{Q \Universal} = \Nothing y = operation(Q, DFA.Universal()) assert y.is_Nothing() # NOT: \Cut{\Universal Q} = \Universal if not Q_is_Q_star and not Q_is_Q_plus: y = operation(Q, DFA.Universal()) assert y.is_Nothing() return Q_star, Q_plus
def test(sm, txt): global test_i backup_sm = deepcopy(sm) print "_______________________________________________________________________________" print ("(%i)" % test_i), print txt optimal_sm = hopcroft.do(sm, CreateNewStateMachineF=CreateNewStateMachineF) print optimal_sm test_i += 1 orphan_state_index_list = optimal_sm.get_orphaned_state_index_list() if len(orphan_state_index_list) != 0: print "ERROR: orphan states found = ", orphan_state_index_list if identity_checker.do(backup_sm, optimal_sm) == False: print "ERROR: state machines not equivalent"
def test(sm, txt): global test_i backup_sm = deepcopy(sm) print "_______________________________________________________________________________" print("(%i)" % test_i), print txt optimal_sm = hopcroft.do(sm, CreateNewStateMachineF=CreateNewStateMachineF) print optimal_sm test_i += 1 orphan_state_index_list = optimal_sm.get_orphaned_state_index_list() if len(orphan_state_index_list) != 0: print "ERROR: orphan states found = ", orphan_state_index_list if identity_checker.do(backup_sm, optimal_sm) == False: print "ERROR: state machines not equivalent"
def __binary_checks(P, Q, Q_plus, Q_star_P): cut_P_Q = __operation(P, Q) cut_P_Qp = __operation(P, Q_plus) cut_QpP_Q = __operation(Q_star_P, Q) # \Intersection{Q \CutEnd{P Q+}} == \Empty assert intersection.do([Q, cut_P_Qp]).is_Empty() # \NotEnd{\CutEnd{P Q+} Q} == \CutEnd{P Q+} assert identity.do(derived.not_end(cut_P_Qp, Q), cut_P_Qp) # \IsEnd{\CutEnd{P Q+} Q} == \Empty assert derived.is_end(cut_P_Qp, Q).is_Empty() return cut_P_Qp
def test(A, B): def __core(A_str, B_str): print ("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") print ("B = " + B_str).replace("\n", "\\n").replace("\t", "\\t") a_pattern = regex.do(A_str, {}) b_pattern = regex.do(B_str, {}) result = intersection.do([a_pattern.sm, b_pattern.sm]) print "intersection = ", result return result print "---------------------------" x = __core(A, B) print y = __core(B, A) print "identity: %s" % identity.do(x, y)
def test(A, B): def __core(A_str, B_str): print("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") print("B = " + B_str).replace("\n", "\\n").replace("\t", "\\t") a_pattern = regex.do(A_str, {}) b_pattern = regex.do(B_str, {}) result = intersection.do([a_pattern.sm, b_pattern.sm]) print "intersection = ", result return result print "---------------------------" x = __core(A, B) print y = __core(B, A) print "identity: %s" % identity.do(x, y)
def test(A_str): print "_____________________________________________________________________" if isinstance(A_str, (str, unicode)): print ("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") sm = regex.do(A_str, {}).sm else: sm = A_str print "A = ", sm result_1st = complement.do(sm) print "complement(A):", result_1st result_2nd = complement.do(result_1st) print print "union(A, complement(A)): All =", is_all(union.do([sm, result_1st])) print "intersection(A, complement(A)): None =", is_none(intersection.do([sm, result_1st])) print "identity(A, complement(complement(A)):", identity.do(sm, result_2nd)
def equal(X_str, Y_str): global X global Y global report exec("sm0 = " + X_str.replace("All", "All_sm").replace("None", "None_sm")) exec("sm1 = " + Y_str.replace("All", "All_sm").replace("None", "None_sm")) sm0 = beautifier.do(sm0) sm1 = beautifier.do(sm1) result = identity.do(sm0, sm1) if result is False: print "X:", X # print "Y:", Y print "Error" print "%s: -->\n%s" % (X_str, sm0) print "%s: -->\n%s" % (Y_str, sm1) print "#---------------------------------------------------------" protocol.append((X_str, "==", Y_str, result))
def equal(X_str, Y_str): global X global Y global report exec("sm0 = " + X_str.replace("All", "All_sm").replace("None", "None_sm")) exec("sm1 = " + Y_str.replace("All", "All_sm").replace("None", "None_sm")) sm0 = beautifier.do(sm0) sm1 = beautifier.do(sm1) result = identity.do(sm0, sm1) if result is False: print "X:", X # print "Y:", Y print "Error" print "%s: -->\n%s" % (X_str, sm0) print "%s: -->\n%s" % (Y_str, sm1) print "#---------------------------------------------------------" protocol.append((X_str, "==", Y_str, result))
def repriorize(MHI, Info, ppt_list, ModeName, history): done_f = False for ppt in ppt_list: priority, pattern, terminal = ppt if priority.mode_hierarchy_index > MHI: continue elif priority.pattern_index >= Info.new_pattern_index: continue elif not identity_checker.do(pattern, Info.pattern): continue done_f = True history.append([ModeName, pattern.pattern_string(), pattern.sr.mode_name, pattern.incidence_id(), Info.new_pattern_index]) priority.mode_hierarchy_index = MHI priority.pattern_index = Info.new_pattern_index if not done_f and Info.sr.mode_name == ModeName: error.warning("PRIORITY mark does not have any effect.", Info.sr)
def __core(Original, Cutter): print ("Original = " + Original).replace("\n", "\\n").replace("\t", "\\t") print ("Cutter = " + Cutter).replace("\n", "\\n").replace("\t", "\\t") orig = regex.do(Original, {}).sm cutter = regex.do(Cutter, {}).sm #print orig.get_string(NormalizeF=False) #print cutter.get_string(NormalizeF=False) result = clean(complement_end.do(orig, cutter)) print if not special.is_none(result): print "superset(Original, result): %s" % superset.do(orig, result) if not special.is_none(result): tmp = clean(intersection.do([cutter, result])) print "intersection(Cutter, result) is None: %s" % special.is_none(tmp) tmp = clean(union.do([orig, result])) print "union(Original, result) == Original: %s" % identity.do(tmp, orig) print print "result = ", result.get_string(NormalizeF=True)
def repriorize(MHI, Info, ppt_list, ModeName, history): done_f = False for ppt in ppt_list: priority, pattern, terminal = ppt if priority.mode_hierarchy_index > MHI: continue elif priority.pattern_index >= Info.new_pattern_index: continue elif not identity_checker.do(pattern, Info.pattern): continue done_f = True history.append([ModeName, pattern.pattern_string(), pattern.sr.mode_name, pattern.incidence_id(), Info.new_pattern_index]) priority.mode_hierarchy_index = MHI priority.pattern_index = Info.new_pattern_index if not done_f and Info.sr.mode_name == ModeName: error_msg("PRIORITY mark does not have any effect.", Info.sr.file_name, Info.sr.line_n, DontExitF=True)
def test(A, B, OnlyOneF=False): def __core(A_str, B_str): print("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") print("B = " + B_str).replace("\n", "\\n").replace("\t", "\\t") a_pattern = regex.do(A_str, {}) b_pattern = regex.do(B_str, {}) result = intersection.do( [a_pattern.extract_sm(), b_pattern.extract_sm()]) print "intersection = ", result return result print "---------------------------" x = __core(A, B) if OnlyOneF: return print y = __core(B, A) print "identity: %s" % identity.do(x, y)
def __core(Original, Cutter): print("Original = " + Original).replace("\n", "\\n").replace("\t", "\\t") print("Cutter = " + Cutter).replace("\n", "\\n").replace("\t", "\\t") orig = regex.do(Original, {}).sm cutter = regex.do(Cutter, {}).sm #print orig.get_string(NormalizeF=False) #print cutter.get_string(NormalizeF=False) result = clean(complement_end.do(orig, cutter)) print if not special.is_none(result): print "superset(Original, result): %s" % superset.do( orig, result) if not special.is_none(result): tmp = clean(intersection.do([cutter, result])) print "intersection(Cutter, result) is None: %s" % special.is_none( tmp) tmp = clean(union.do([orig, result])) print "union(Original, result) == Original: %s" % identity.do( tmp, orig) print print "result = ", result.get_string(NormalizeF=True)
def test(A_str): print "_____________________________________________________________________" if isinstance(A_str, (str, unicode)): print("A = " + A_str).replace("\n", "\\n").replace("\t", "\\t") sm = regex.do(A_str, {}).extract_sm() else: sm = A_str print "A = ", sm ## print "##sm:", sm.get_string(NormalizeF=False) result_1st = complement.do(sm) print "complement(A):", result_1st # .get_string(NormalizeF=False) result_2nd = complement.do(result_1st) ## print "##2nd:", result_2nd.get_string(NormalizeF=False) print print "union(A, complement(A)): All =", DFA.is_Universal( union.do([sm, result_1st])) print "intersection(A, complement(A)): None =", DFA.is_Empty( intersection.do([sm, result_1st])) print "identity(A, complement(complement(A)):", identity.do(sm, result_2nd) assert not commonality(sm, result_1st) assert not commonality(result_1st, result_2nd)
def __is_in_patterns(AllegedIdenticalSM, MyDB): for pattern_str, info in MyDB.items(): pattern = info[0] if identity_checker.do(AllegedIdenticalSM, pattern): return pattern_str return ""
tiny2.add_transition(tiny2.init_state_index, ord('2'), AcceptanceF=True) backup0 = deepcopy(tiny0) backup1 = deepcopy(tiny1) backup2 = deepcopy(tiny2) i = -1 for flag_k in range(0, 4): for flag_i in range(0, 4): i += 1 bof_f = flag_i & 2 == 2 eof_f = flag_i & 1 == 1 # Clone for this event sm_pre = None sm_post = None if flag_k & 2 == 2: sm_pre = tiny0.clone() # None if flag_k & 1 == 1: sm_post = tiny2.clone() sm = tiny1.clone() # Double check on 'cloner' assert identity_checker.do(sm, backup1) assert sm_pre is None or identity_checker.do(sm_pre, backup0) assert sm_post is None or identity_checker.do(sm_post, backup2) test(i, sm_pre, sm, sm_post, bof_f, eof_f)
def __is_in_patterns(AllegedIdenticalSM, MyDB): for pattern_str, info in MyDB.items(): pattern = info[0] if identity_checker.do(AllegedIdenticalSM, pattern): return pattern_str return ""
def assert_considerations(A, B, result): assert superset.do(A, result) assert intersection.do([result, B]).is_Empty() assert identity.do(union.do([result, A]), A) assert intersection.do([result, derived.is_in(A, B)]).is_Empty() assert identity.do(union.do([result, derived.is_in(A, B)]), A)