def test(target, solufunc): solution, S = solufunc(target) valid, missing, excess, error_pieces = utils.check_solution( target, solution) if not valid: raise TypeError("The solution is not valid!") else: # if the solution is valid, test time performance and accuracy # TIME PERFORMANCE # There will be three different values of the parameter 'target' with increasing complexity in real test. # ACCURACY if len(error_pieces) != 0: raise TypeError('Wrong shape') total_blocks = sum([sum(row) for row in target]) total_error = (100 * missing / total_blocks) + (100 * excess / total_blocks) print('total error: {}'.format(total_error)) return total_error
# #################################################### from main import Tetris import utils import timeit # Example target shape target = [[1, 0, 1, 1], [1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 0, 0]] # NOTE: in your test, you may not use this example. # Uncomment the following line to generate a random target shape #target = utils.generate_target(width=10, height=10, density=0.6) # NOTE: it is recommended to keep density below 0.8 solution = Tetris(target) valid, missing, excess, error_pieces = utils.check_solution( target, solution) # checks if the solution is valid if not valid: print("The solution is not valid!") else: # if the solution is valid, test time performance and accuracy # TIME PERFORMANCE # There will be three different values of the parameter 'target' with increasing complexity in real test. time_set = timeit.timeit('Tetris({})'.format(target), 'from main import Tetris', number=1) if time_set > 600:
#debugGrid(lambda x : x.stateid) ############################ #emptying the grid solution = [] for y, row in enumerate(ogrid): solution.append([]) for sq in row: if sq.state == 0: solution[y].append((0, 0)) elif sq.state == -1: solution[y].append((0, 0)) else: solution[y].append((sq.state, sq.stateid)) return solution #utils.visualisation(target,solution) width = 20 height = 25 dencity = 0.8 target, a, TheSolution = utils.generate_target(width, height, dencity) solution2 = Tetris(target, a) valid, missing, excess, error_pieces, use_diff = utils.check_solution( target, solution2, a) total_blocks = sum([sum(row) for row in target]) percent = (missing + excess) / total_blocks print(1 - percent) print(a)
perfect_solution = [ [(0, 0), (0, 0), (8, 1), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, 0), (8, 1), (0, 0), (13, 2), (0, 0)], [(0, 0), (8, 1), (8, 1), (13, 2), (13, 2), (13, 2)], [(0, 0), (13, 3), (18, 4), (18, 4), (0, 0), (0, 0)], [(13, 3), (13, 3), (13, 3), (18, 4), (18, 4), (0, 0)] ] """ # NOTE: This example is used for the mock solution from 'main.py' only. # Uncomment the following line to generate a random target shape target, perfect_solution = utils.generate_target(width=100, height=100, density=0.7, forbidden_pieces=the_forbidden_pieces) # NOTE: it is recommended to keep density below 0.8 solution = Tetris(deepcopy(target)) #solution = perfect_solution valid, missing, excess, error_pieces = utils.check_solution(target, solution, the_forbidden_pieces) # checks if the solution is valid if not valid or len(error_pieces)!=0: if len(error_pieces) != 0: print('WARNING: {} pieces have a wrong shapeID. They are labelled in image of the solution, and their PieceID are: {}.' .format(len(error_pieces), error_pieces)) print("Displaying solution...") utils.visual_perfect(perfect_solution, solution, the_forbidden_pieces) print("WARNING: The solution is not valid, no score will be given!") else: # if the solution is valid, test time performance and accuracy # TIME PERFORMANCE # There will be three different 'target' with increasing complexity in real test. time_set = timeit.timeit('Tetris({})'.format(target), 'from main import Tetris', number=1)
perfect_solution = [[(0, 0), (0, 0), (8, 1), (0, 0), (0, 0)], [(0, 0), (0, 0), (8, 1), (1, 2), (1, 2)], [(0, 0), (8, 1), (8, 1), (1, 2), (1, 2)], [(0, 0), (13, 3), (18, 4), (18, 4), (0, 0)], [(13, 3), (13, 3), (13, 3), (18, 4), (18, 4)]] # NOTE: This example is used for the mock solution from 'main.py' only. # Uncomment the following line to generate a random target shape target, limit_tetris, perfect_solution = utils.generate_target( width=20, height=20, density=0.8) # NOTE: it is recommended to keep density below 0.8 solution = Tetris(deepcopy(target), deepcopy(limit_tetris)) valid, missing, excess, error_pieces, use_diff = utils.check_solution( target, solution, limit_tetris) # checks if the solution is valid if not valid or len(error_pieces) != 0: if len(error_pieces) != 0: print( 'WARNING: {} pieces have a wrong shapeID. They are labelled in image of the solution, and their PieceID are: {}.' .format(len(error_pieces), error_pieces)) print("Displaying solution...") utils.visual_perfect(perfect_solution, solution) print("WARNING: The solution is not valid, no score will be given!") else: # if the solution is valid, test time performance and accuracy # TIME PERFORMANCE # There will be three different 'target' with increasing complexity in real test.
def solve(self): self.fillQ() answer = utils.solve_with_qbsolv(self.Q) assignment = [answer[i] for i in range(self.V)] print("Assignment: ", assignment) return utils.check_solution(self.formula, assignment)
def main(argv): opt_config = -1 benchmark = argv[0] # the desired error ratio is expressed as 1e-exp # the input taken from the user is the exp trgt_error_ratio_exp = int(argv[1]) input_set_idx = int(argv[2]) if trgt_error_ratio_exp > max_target_error_exp: print("Desired error ({}) larger than the max allowed ({})".format( trgt_error_ratio_exp, max_target_error_exp)) sys.exit() trgt_error_ratio = float('1e-' + str(trgt_error_ratio_exp)) trgt_error_ratio_log_exp = -np.log(trgt_error_ratio) largeET = large_error_threshold before_initial_time = time.time() (initial_df, before_nn_time, after_nn_time, regr, mae, rmse, r2, ev, acc, underest_ratio, loss, max_pred_err, train_data_regr, test_data_regr, train_target_regr, test_target_regr, train_data_classr, test_data_classr, train_target_classr, test_target_classr, classr_needed) = regressor_creation(benchmark, benchmarks_home, data_set_dir, benchmark_nVar, min_nbit, max_nbit, value_inPlace_of_inf, errors_close_to_0_threshold, largeET, initial_dataset_size, initial_train_set_size, trgt_error_ratio_log_exp, alpha_asymm_loss, input_set_idx, True) print("Initial regressor with MAE {0:.3f} ({1:.3f} sec)".format( mae, after_nn_time - before_nn_time)) before_cl_time = time.time() (classr, acc, fscore) = classifier_creation(classr_needed, benchmark, initial_train_set_size, train_data_classr, test_data_classr, train_target_classr, test_target_classr, largeET, True) after_cl_time = time.time() print("Initial classifier with accuracy {0:.3f} ({1:.3f} sec)".format( acc, after_cl_time - before_cl_time)) after_initial_time = time.time() print("Initial phase (data retrieval, regr & classr train) took {0:.3f} " "sec)".format(after_initial_time - before_initial_time)) after_initial_time = time.time() before_opt_time = time.time() ''' Create MP model and solve optimization problem - active learning approach ''' opt_config, n_iter = opt_model_AL(benchmark, initial_df, trgt_error_ratio, trgt_error_ratio_log_exp, regr, mae, rmse, r2, ev, underest_ratio, classr, acc, initial_train_set_size, largeET, input_set_idx, True, after_initial_time - before_initial_time) after_opt_time = time.time() # post process and evaluation if opt_config == -1: print("Some problem happened with the optimizer") sys.exit() else: print("Solution for {0} with desired max error ratio 1e-{1} found in " "{2:.3f}s (ML) + {3:.3f}s (opt) and {4} iterations".format( benchmark, trgt_error_ratio_exp, (after_opt_time - before_opt_time), (after_initial_time - before_initial_time), n_iter)) if opt_config == None: opt_config = [max_nbit for i in range(benchmark_nVar[benchmark])] print("Exp error {0}, error {1}, log exp error {2:.3f}".format( trgt_error_ratio_exp, trgt_error_ratio, trgt_error_ratio_log_exp)) # final check: actually run the bechmark with the optimal config print("Run benchmark with opt config (%s)" % opt_config) error, is_error_se_trgt, error_class = utils.check_solution(benchmark, opt_config, trgt_error_ratio, benchmarks_home, binary_map, largeET, benchmark_nVar, max_nbit, min_nbit, input_set_idx) print("Error {0} (log: {1:.3f}) <= target {2} (log: {3:.3f})? --> {4}". format(error, -np.log(error), trgt_error_ratio, -np.log(trgt_error_ratio), is_error_se_trgt)) if not is_error_se_trgt: print("\tDistance from trgt: {0:.3f}".format(1-trgt_error_ratio/error))
def opt_model_AL(benchmark, initial_df, trgt_error_ratio, trgt_error_ratio_log_exp, regr, mae, rmse, r2, ev, underest_ratio, classr, acc, train_set_size, large_err_thresh, input_set_idx=-1, debug=True, initial_train_time=0): if classr == None: classr_needed = False else: classr_needed = True if debug: print("----- Search solution with opt model -----") before_firstSol_time = time.time() before_firstSol_solve_time = before_firstSol_time opt_config, mdl, bit_sum = solve_opt_model(benchmark, None, trgt_error_ratio_log_exp, regr, mae, rmse, underest_ratio, classr, acc, 0, False, train_set_size, large_err_thresh, 0, [], True) if mdl == 1: return opt_config after_firstSol_solve_time = time.time() if opt_config == None: return opt_config before_firstSol_check_time = time.time() error, is_error_se_trgt, error_class = utils.check_solution(benchmark, opt_config, trgt_error_ratio, benchmarks_home, binary_map, large_err_thresh, benchmark_nVar, max_nbit, min_nbit, input_set_idx) after_firstSol_time = time.time() after_firstSol_check_time = after_firstSol_time errPred, classPred = get_pred_class(opt_config, regr, classr) if debug: print(" First Solution (found in {0:.3f}s): {1}; error {2} < " "error_target {3}? {4}".format( after_firstSol_time - before_firstSol_time, opt_config, error, trgt_error_ratio, is_error_se_trgt)) print("\tTime to find sol {0:.3f}, time to check sol {1:.3f}".format( after_firstSol_solve_time - before_firstSol_solve_time, after_firstSol_check_time - before_firstSol_check_time)) if is_error_se_trgt: if debug: print(" Solution found satisfies actual program run") return opt_config, 1 if debug: print(" Actual error (log) {0:.5f}, predicted error (log) {1:.3f}, " "desired error (log) {2:.3f}; actual class {3}, predicted " "class {4}".format(-np.log(error), errPred, -np.log(trgt_error_ratio), error_class, classPred)) print(" Solution found _does not_ satisfy actual program run") print(" Refine model and search for new solution") prev_sol_stats = {} prev_sol_stats['config'] = opt_config prev_sol_stats['delta_config'] = 0 prev_sol_stats['error'] = 0 prev_sol_stats['error_class'] = 0 prev_sol_stats['delta_error'] = 0 prev_sol_stats['error_log'] = 0 prev_sol_stats['delta_error_log'] = 0 prev_sol_stats['error_pred'] = 0 prev_sol_stats['error_pred_log'] = 0 prev_sol_stats['delta_error_pred'] = 0 prev_sol_stats['delta_error_pred_log'] = 0 prev_sol_stats['error_capped'] = 0 prev_sol_stats['cost'] = sum(opt_config) sol_stats = compute_sol_stats(opt_config, opt_config, prev_sol_stats, error, errPred, error_class, acc, mae, r2, ev, after_firstSol_time - before_firstSol_time + initial_train_time, True, True, large_err_thresh) prev_sol_stats = sol_stats prev_config = opt_config prev_bit_sum = sol_stats['cost'] before_refineSol_time = time.time() n_iter = 1 n_small_error = 0 n_large_error = 0 # we retrain the model until a certain number of iterations is performed and # until the error is smaller than the desider one while not is_error_se_trgt and n_iter < max_refine_iterations: print(">>>>>>> Iteration {} <<<<<<<<".format(n_iter)) before_iterRefSol_time = time.time() before_refineSol_solve_time = time.time() if debug: print("\t Infer new examples") new_examples = infer_new_examples(benchmark, sol_stats) if debug: print("\t Refine ML model") # again, we need to assure that the regr NN can predict the target # error (err < target --> out NN > log(target)) iter_refine_net = 0 max_pred_err = -1 (regr_ref, mae, rmse, r2, ev, underest_ratio, classr_ref, acc, max_pred_err, new_df) = refine_ML(benchmark, regr, classr, initial_df, new_examples, classr_needed) if debug: print("\tmax_pred_err {0:.3f} (trgt err ratio log exp: {1:.3f}) - " ' #iter needed: {2}) '.format(max_pred_err, trgt_error_ratio_log_exp, iter_refine_net)) print("\t - # Examples used for training regr so far: {}".format( len(new_df['train_data_R']))) print("\t - # Examples used for training clf so far: {}".format( len(new_df['train_data_C']))) print("\t - Refined stats: regr MAE {0:.3f}, R2 {1:.3f}, EV " "{2:.3f} -- Classr Acc {3:.3f}".format(mae, r2, ev, acc)) # the refinement can happen even if the previous solution is already # fine (we may want to improve it). If the previous solution was # infeasible we also want it to be deleted from the solution pool if not is_error_se_trgt: # if the error predictor is not very robust, we may end up with many # 'small' error; after a while we increase the target if n_small_error != 0 and ((n_small_error % 10) == 0): error_increase = 1 trgt_error_ratio_log_exp += error_increase if debug: print("\t\t Increase error ({})".format(error_increase)) # after every N iterations, we increase the minimum number of bits of a # solution if n_iter % increase_freq == 0 and n_iter > increase_freq_begin: increase_tot_nbits = True else: increase_tot_nbits = False wrong_config = opt_config if debug: print("\t----- Search solution with opt model -----") opt_config, mdl_ref, bit_sum = solve_opt_model(benchmark, None, trgt_error_ratio_log_exp, regr, mae, rmse, underest_ratio, classr, acc, prev_bit_sum, increase_tot_nbits, train_set_size, large_err_thresh, n_iter, wrong_config, True) prev_bit_sum = bit_sum after_refineSol_solve_time = time.time() before_refineSol_check_time = time.time() error, is_error_se_trgt, error_class = utils.check_solution(benchmark, opt_config, trgt_error_ratio, benchmarks_home, binary_map, large_err_thresh, benchmark_nVar, max_nbit, min_nbit, input_set_idx) after_refineSol_check_time = time.time() errPred, classPred = get_pred_class(opt_config, regr, classr) if debug: print("\t Refined solution at iter {0}: {1}; error {2} < " "error_target {3}? {4}".format(n_iter, opt_config, error, trgt_error_ratio, is_error_se_trgt)) print("\t Actual error (log) {0:.3f}, predicted error (log) {1:.3f}," " desired error (log) {2:.3f}; actual class {3}, predicted " "class {4}".format(-np.log(error), errPred, -np.log(trgt_error_ratio), error_class, classPred)) if not is_error_se_trgt: if error_class == 0: error_delta = errPred - (-np.log(error)) print("\t--> Small Error - error delta (log) {0:.3f}".format( error_delta)) n_small_error += 1 else: print("\t--> Large Error") n_large_error += 1 print("Time to find refined sol {0:.3f}, time to check sol " "{1:.3f}".format( after_refineSol_solve_time - before_refineSol_solve_time, after_refineSol_check_time - before_refineSol_check_time)) after_iterRefSol_time = time.time() sol_stats = compute_sol_stats(opt_config, prev_config, prev_sol_stats, error, errPred, error_class, acc, mae, r2, ev, after_iterRefSol_time - before_iterRefSol_time, True, False, large_err_thresh) prev_sol_stats = sol_stats prev_config = opt_config mdl = mdl_ref # update DF with new examples (for next iteration) initial_df = new_df n_iter += 1 after_refineSol_time = time.time() if debug: print(" Refined Sol found in {0:.3f}s and after {1} iterations".format( after_refineSol_time - before_refineSol_time, n_iter)) print("----- Found solution {} -----".format(opt_config)) return opt_config, n_iter