def run_aifeynman(pathdir,filename,BF_try_time,BF_ops_file_type, polyfit_deg=4, NN_epochs=4000, vars_name=[],test_percentage=20): # If the variable names are passed, do the dimensional analysis first filename_orig = filename try: if vars_name!=[]: dimensionalAnalysis(pathdir,filename,vars_name) DR_file = filename + "_dim_red_variables.txt" filename = filename + "_dim_red" else: DR_file = "" except: DR_file = "" # Split the data into train and test set input_data = np.loadtxt(pathdir+filename) sep_idx = np.random.permutation(len(input_data)) train_data = input_data[sep_idx[0:(100-test_percentage)*len(input_data)//100]] test_data = input_data[sep_idx[test_percentage*len(input_data)//100:len(input_data)]] np.savetxt(pathdir+filename+"_train",train_data) if test_data.size != 0: np.savetxt(pathdir+filename+"_test",test_data) PA = ParetoSet() # Run the code on the train data PA = run_AI_all(pathdir,filename+"_train",BF_try_time,BF_ops_file_type, polyfit_deg, NN_epochs, PA=PA) PA_list = PA.get_pareto_points() # Run bf snap on the resulted equations for i in range(len(PA_list)): try: PA = add_bf_on_numbers_on_pareto(pathdir,filename,PA,PA_list[i][-1]) except: continue PA_list = PA.get_pareto_points() np.savetxt("results/solution_before_snap_%s.txt" %filename,PA_list,fmt="%s") # Run zero, integer and rational snap on the resulted equations for j in range(len(PA_list)): PA = add_snap_expr_on_pareto(pathdir,filename,PA_list[j][-1],PA, "") PA_list = PA.get_pareto_points() np.savetxt("results/solution_first_snap_%s.txt" %filename,PA_list,fmt="%s") # Run gradient descent on the data one more time for i in range(len(PA_list)): try: gd_update = final_gd(pathdir,filename,PA_list[i][-1]) PA.add(Point(x=gd_update[1],y=gd_update[0],data=gd_update[2])) except: continue PA_list = PA.get_pareto_points() for j in range(len(PA_list)): PA = add_snap_expr_on_pareto(pathdir,filename,PA_list[j][-1],PA, DR_file) list_dt = np.array(PA.get_pareto_points()) data_file_len = len(np.loadtxt(pathdir+filename)) log_err = [] log_err_all = [] for i in range(len(list_dt)): log_err = log_err + [np.log2(float(list_dt[i][1]))] log_err_all = log_err_all + [data_file_len*np.log2(float(list_dt[i][1]))] log_err = np.array(log_err) log_err_all = np.array(log_err_all) # Try the found expressions on the test data if DR_file=="" and test_data.size != 0: test_errors = [] for i in range(len(list_dt)): test_errors = test_errors + [get_symbolic_expr_error(pathdir,filename+"_test",str(list_dt[i][-1]))] test_errors = np.array(test_errors) # Save all the data to file save_data = np.column_stack((test_errors,log_err,log_err_all,list_dt)) else: save_data = np.column_stack((log_err,log_err_all,list_dt)) np.savetxt("results/solution_%s" %filename_orig,save_data,fmt="%s")
def run_bf_polyfit(pathdir,pathdir_transformed,filename,BF_try_time,BF_ops_file_type, PA, polyfit_deg=4, output_type=""): ############################################################################################################################# # run BF on the data (+) print("Checking for brute force + \n") brute_force(pathdir_transformed,filename,BF_try_time,BF_ops_file_type,"+") try: # load the BF output data bf_all_output = np.loadtxt("results.dat", dtype="str") express = bf_all_output[:,2] prefactors = bf_all_output[:,1] prefactors = [str(i) for i in prefactors] # Calculate the complexity of the bf expression the same way as for gradient descent case complexity = [] errors = [] eqns = [] for i in range(len(prefactors)): try: if output_type=="": eqn = prefactors[i] + "+" + RPN_to_eq(express[i]) elif output_type=="acos": eqn = "cos(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="asin": eqn = "sin(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="atan": eqn = "tan(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="cos": eqn = "acos(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="exp": eqn = "log(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="inverse": eqn = "1/(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="log": eqn = "exp(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="sin": eqn = "acos(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="sqrt": eqn = "(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")**2" elif output_type=="squared": eqn = "sqrt(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" elif output_type=="tan": eqn = "atan(" + prefactors[i] + "+" + RPN_to_eq(express[i]) + ")" eqns = eqns + [eqn] errors = errors + [get_symbolic_expr_error(pathdir,filename,eqn)] expr = parse_expr(eqn) is_atomic_number = lambda expr: expr.is_Atom and expr.is_number numbers_expr = [subexpression for subexpression in preorder_traversal(expr) if is_atomic_number(subexpression)] compl = 0 for j in numbers_expr: try: compl = compl + get_number_DL(float(j)) except: compl = compl + 1000000 # Add the complexity due to symbols n_variables = len(expr.free_symbols) n_operations = len(count_ops(expr,visual=True).free_symbols) if n_operations!=0 or n_variables!=0: compl = compl + (n_variables+n_operations)*np.log2((n_variables+n_operations)) complexity = complexity + [compl] except: continue for i in range(len(complexity)): PA.add(Point(x=complexity[i], y=errors[i], data=eqns[i])) # run gradient descent of BF output parameters and add the results to the Pareto plot for i in range(len(express)): try: bf_gd_update = RPN_to_pytorch(pathdir+filename,eqns[i]) PA.add(Point(x=bf_gd_update[1],y=bf_gd_update[0],data=bf_gd_update[2])) except: continue except: pass ############################################################################################################################# # run BF on the data (*) print("Checking for brute force * \n") brute_force(pathdir_transformed,filename,BF_try_time,BF_ops_file_type,"*") try: # load the BF output data bf_all_output = np.loadtxt("results.dat", dtype="str") express = bf_all_output[:,2] prefactors = bf_all_output[:,1] prefactors = [str(i) for i in prefactors] # Calculate the complexity of the bf expression the same way as for gradient descent case complexity = [] errors = [] eqns = [] for i in range(len(prefactors)): try: if output_type=="": eqn = prefactors[i] + "*" + RPN_to_eq(express[i]) elif output_type=="acos": eqn = "cos(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="asin": eqn = "sin(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="atan": eqn = "tan(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="cos": eqn = "acos(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="exp": eqn = "log(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="inverse": eqn = "1/(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="log": eqn = "exp(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="sin": eqn = "acos(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="sqrt": eqn = "(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")**2" elif output_type=="squared": eqn = "sqrt(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" elif output_type=="tan": eqn = "atan(" + prefactors[i] + "*" + RPN_to_eq(express[i]) + ")" eqns = eqns + [eqn] errors = errors + [get_symbolic_expr_error(pathdir,filename,eqn)] expr = parse_expr(eqn) is_atomic_number = lambda expr: expr.is_Atom and expr.is_number numbers_expr = [subexpression for subexpression in preorder_traversal(expr) if is_atomic_number(subexpression)] compl = 0 for j in numbers_expr: try: compl = compl + get_number_DL(float(j)) except: compl = compl + 1000000 # Add the complexity due to symbols n_variables = len(expr.free_symbols) n_operations = len(count_ops(expr,visual=True).free_symbols) if n_operations!=0 or n_variables!=0: compl = compl + (n_variables+n_operations)*np.log2((n_variables+n_operations)) complexity = complexity + [compl] except: continue # add the BF output to the Pareto plot for i in range(len(complexity)): PA.add(Point(x=complexity[i], y=errors[i], data=eqns[i])) # run gradient descent of BF output parameters and add the results to the Pareto plot for i in range(len(express)): try: bf_gd_update = RPN_to_pytorch(pathdir+filename,eqns[i]) PA.add(Point(x=bf_gd_update[1],y=bf_gd_update[0],data=bf_gd_update[2])) except: continue except: pass ############################################################################################################################# # run polyfit on the data print("Checking polyfit \n") polyfit_result = polyfit(polyfit_deg, pathdir_transformed+filename) eqn = str(polyfit_result[0]) # Calculate the complexity of the polyfit expression the same way as for gradient descent case if output_type=="": eqn = eqn elif output_type=="acos": eqn = "cos(" + eqn + ")" elif output_type=="asin": eqn = "sin(" + eqn + ")" elif output_type=="atan": eqn = "tan(" + eqn + ")" elif output_type=="cos": eqn = "acos(" + eqn + ")" elif output_type=="exp": eqn = "log(" + eqn + ")" elif output_type=="inverse": eqn = "1/(" + eqn + ")" elif output_type=="log": eqn = "exp(" + eqn + ")" elif output_type=="sin": eqn = "acos(" + eqn + ")" elif output_type=="sqrt": eqn = "(" + eqn + ")**2" elif output_type=="squared": eqn = "sqrt(" + eqn + ")" elif output_type=="tan": eqn = "atan(" + eqn + ")" polyfit_err = get_symbolic_expr_error(pathdir,filename,eqn) expr = parse_expr(eqn) is_atomic_number = lambda expr: expr.is_Atom and expr.is_number numbers_expr = [subexpression for subexpression in preorder_traversal(expr) if is_atomic_number(subexpression)] complexity = 0 for j in numbers_expr: complexity = complexity + get_number_DL(float(j)) try: # Add the complexity due to symbols n_variables = len(polyfit_result[0].free_symbols) n_operations = len(count_ops(polyfit_result[0],visual=True).free_symbols) if n_operations!=0 or n_variables!=0: complexity = complexity + (n_variables+n_operations)*np.log2((n_variables+n_operations)) except: pass #run zero snap on polyfit output PA_poly = ParetoSet() PA_poly.add(Point(x=complexity, y=polyfit_err, data=str(eqn))) PA_poly = add_snap_expr_on_pareto_polyfit(pathdir, filename, str(eqn), PA_poly) for l in range(len(PA_poly.get_pareto_points())): PA.add(Point(PA_poly.get_pareto_points()[l][0],PA_poly.get_pareto_points()[l][1],PA_poly.get_pareto_points()[l][2])) print("Complexity RMSE Expression") for pareto_i in range(len(PA.get_pareto_points())): print(PA.get_pareto_points()[pareto_i]) return PA