def dfs(narr, axioms, debug=False, maxsteps=150, animation_mode=False, printTrim=False): any_err = None try: next_steps = [(narr, None, Axiom(name='原式'), -1)] return_steps = [] cnt = 0 while len(next_steps) > 0: narr, ani_narr, axiom, axiom_idx = next_steps[0] output_narr = deepcopy(narr) #print('[output narr]', narr) if animation_mode: if printTrim: rich.print('[blue]before trim[/]') expression.narr_prettyprint(narr) print('[tex]', expression.narr2tex(narr)) expression.trim_animations(narr) if printTrim: rich.print('[blue]after trim[/]') expression.narr_prettyprint(narr) print('[tex]', expression.narr2tex(narr)) return_steps.append((output_narr, ani_narr, axiom, axiom_idx)) next_steps = possible_next_steps(narr, axioms, state.value_v1, animation_mode=animation_mode, fast_return=True, debug=debug) if cnt > maxsteps: any_err = "maximum steps reached." break cnt += 1 except KeyboardInterrupt: pass return return_steps, any_err
def _fraction_cancel(narr, debug=False): sign, Type = narr[0].get() if Type != 'frac': return narr # extract weights numerator_weights = Axiom()._extract_weights(narr[1]) if any([x is None for x in numerator_weights]): return narr denominator_weights = Axiom()._extract_weights(narr[2]) if any([x is None for x in denominator_weights]): return narr # cancel weights L = len(numerator_weights) weights = np.array(numerator_weights + denominator_weights, dtype=int) gcd = np.gcd.reduce(weights) weights = (weights // gcd).tolist() # restore weights numerator_weights = weights[:L] denominator_weights = weights[L:] if debug: rich.print('[yellow]cancel fraction:', expression.narr2tex(narr)) print('numerator:', numerator_weights) print('denominator:', denominator_weights) Axiom()._restore_weights(numerator_weights, narr[1]) Axiom()._restore_weights(denominator_weights, narr[2]) if debug: rich.print('[yellow]after:', expression.narr2tex(narr)) expression.narr_prettyprint(narr) return narr
def test(self, tex=None, debug=False, render=True, printNarr=False, printTrim=False, printJSON=False): # construct test pairs (TeX, expected TeX) tests = self.tests if tex is None else [(tex, None)] if len(tests) == 0: print('[no test case]') # test through each testcase for this axiom ... for test, expect in tests: expr = test if isinstance(test, str) else expression.narr2tex(test) narr = expression.tex2narr(expr) if isinstance(test, str) else test results = self.apply(narr, debug=debug) # render texs is for HTML preview render_texs = [expr] rich.print('[bold cyan][[test]][/]', end=" ") print(expr) #if printNarr: # expression.narr_prettyprint(narr) for applied_narr, ani_narr in results: # print transition animations if ani_narr: ani_tex = expression.narr2tex(ani_narr) rich.print('[bold cyan][[transition]][/]', end=" ") print(ani_tex) else: rich.print('[bold cyan][[transition]][/]', None) # print result expression applied_tex = expression.narr2tex(applied_narr) rich.print('[bold cyan][[result]][/]', end=" ") print(applied_tex, end=" ") if expect is not None: if applied_tex in expect: rich.print('[bold green]pass[/]') else: rich.print('[bold red]failed[/]') else: print() # render texs is for HTML preview render_texs.append(applied_tex) if printNarr: rich.print("[red]narr[/]:") expression.narr_prettyprint(applied_narr) if printTrim: rich.print("[red]trim[/]:") expression.trim_animations(applied_narr) expression.narr_prettyprint(applied_narr) if printJSON: rich.print('[red]JSON[/]:') json = mathjs.tex2json(applied_tex, indent=4) print(json) if render: import render_math render_math.render_equations(render_texs)
def test_alpha_equiv(narr1, narr2, alpha_universe=[{}], debug=False): root1, root2 = narr1[0], narr2[0] sign1, sign2 = root1[0], root2[0] type1, type2 = root1[1], root2[1] if debug: print('test_alpha_equiv') #for alpha in alpha_universe: # alpha_prettyprint(alpha) rich.print('[bold green]1[/]', end=" ") expression.narr_prettyprint(narr1) rich.print('[bold red]2[/]', end=" ") expression.narr_prettyprint(narr2) if type1 == 'NUMBER': return type1 == type2 and sign1 == sign2 and narr1[1] == narr2[ 1], alpha_universe elif type1 in ['VAR', 'WILDCARDS']: name1 = narr1[1] if type1 == 'VAR' else '*' + narr1[1] narr2 = deepcopy(narr2) # handle sign _apply_sign(narr2, sign1) #print(name1, end=' --> ') #print(narr2) #print() # uppercase pattern such as X, Y only match variables / polynomials if name1.isupper() and type2 not in ['VAR', 'sup']: return False, [] # same variables must match same structures else: return alpha_universe_add_constraint(alpha_universe, name1, narr2) # quick test of identicalness for non-wildcards pattern wildcards_index = expression.get_wildcards_index(narr1) if root1 != root2: return False, [] elif len(narr1[1:]) != len(narr2[1:]) and wildcards_index is None: return False, [] alpha_universe_new = [] # use exact order for concrete match or permuted order for wildcards match. # (the latter will possibly generate multiple universes/possibilities) permutations = [ narr2[1:] ] if wildcards_index == None else children_wildcards_permutation(narr2) for perm_children in permutations: match_perm = True # require all children get matched. alpha_universe_copy = deepcopy(alpha_universe) for i, c1 in enumerate(narr1[1:]): # safe-guard for long wildcards, e.g., 'abc*' matching 'ab' if i >= len(perm_children): match_perm = False break if c1[0][1] == 'WILDCARDS': # wildcards match (no sign reduce here) c2 = [NarrRoot(+1, type2)] + perm_children[i:] # unwrap matched group if necessary if len(c2[1:]) == 1: c2 = c2[1] else: # concrete child match c2 = perm_children[i] # test subtree match_any, alpha_universe_copy = test_alpha_equiv( c1, c2, alpha_universe=alpha_universe_copy, debug=debug) if match_any: # stop early for wildcards match if c1[0][1] == 'WILDCARDS': break else: match_perm = False break if match_perm: alpha_universe_new += alpha_universe_copy return len(alpha_universe_new) > 0, alpha_universe_new
def mcts(narr0, all_axioms, sample_depth=4, n_sample_times=200, n_maxsteps=100, k=3, debug=False, nn_models=False, training=False, force_single_thread=False): # q n narr father axiom axiomIdx children root = [0, 1, narr0, None, None, -1, []] moves = [root] render_steps([(narr0, None, -1)]) global manager if not force_single_thread: # prepare proxy structure for parallel processes root[6] = manager.list([]) root = manager.list(root) moves = [root] else: manager = None node = root visited = set([expression.narr2tex(narr0)]) final_steps = [] while True: q, n, narr, father, axiom, axiom_idx, children = node # debug print if True: #if debug: print('\033[94m', end='') expr_val = state_value(narr) print(f'[current] step={len(moves)}, val={expr_val:.1f}:', expression.narr2tex(narr), end='') print('\033[0m', end='\n') expression.narr_prettyprint(narr) steps, step_probs = policy_steps(narr, all_axioms, k=k, debug=debug, nn_models=nn_models) if debug: rich.print(f'[magenta]Candidate steps: {len(steps)}[/]') for i, (n, a, ai) in enumerate(steps): val = state_value(n) rich.print(f'[red]#{i+1}[/]', a.name(), ':', end=' ') rich.print(f'val={val:.2f}', end=' ') print(expression.narr2tex(n), end='\n\n') if False: from axiom import Axiom render_steps([(narr, Axiom(), -1)] + steps, show_index=True) choices = input('Limit choices: ') choices = [ i for i in map(lambda x: int(x), choices.split(',')) ] rich.print(choices) steps = [steps[i - 1] for i in choices] if len(steps) == 0: if debug: print('[no more candidate steps]') if nn_models and training: policy = 0 #policy_fine_tuning(nn_models, expr, policy, debug=debug, all_axioms=all_axioms) break if manager and not force_single_thread: evaluate_parallel(node, all_axioms, steps, n_sample_times, sample_depth, visited, debug=debug, nn_models=nn_models, k=k, step_probs=step_probs) else: evaluate(node, all_axioms, steps, n_sample_times, sample_depth, visited, debug=debug, nn_models=nn_models, k=k, step_probs=step_probs) # selection move_choice, w, _ = best_child_of(node, c_param=.0, debug=debug) move_to_expr = expression.narr2tex(move_choice[2]) if w == 0 or move_to_expr in visited: print( f'[abort] best w={w:.2f}, visited: {move_to_expr in visited}') break else: if nn_models and training: policy = move_choice[5] + 1 #policy_fine_tuning(nn_models, expr, policy, debug=debug, all_axioms=all_axioms) moves.append(move_choice) node = move_choice # construct steps to be returned final_steps = [(e, a, ai) for q, n, e, f, a, ai, c in moves] render_steps(final_steps) visited.add(move_to_expr) #if debug: print('[visited]', visited) if len(moves) >= n_maxsteps: if debug: print('[exceed max steps]') break if len(final_steps) > 0: final_steps = back_off_step(final_steps, debug=True) if nn_models and training: # fine-tune value network for i, (e, _, _) in enumerate(final_steps): value = -(len(final_steps) - i - 1) #value_fine_tuning(nn_models, e, value, debug=debug) return final_steps