def benchmark_cost_function(data_sizes): polygon = pickle.load(open('utils/good_path_for_rectangle.pickle', 'rb')) save_folder = generate_folder_name() iterations = 10 results = np.empty((len(data_sizes), iterations, 2), dtype=float) for i, dsize in enumerate(data_sizes): data = dg.generate_rectangle_set(dsize) print("\nRun {} with value {}".format(i+1, dsize)) # Compile functions and warm up GPU acoc.cost_function_gpu(data.T, polygon) for j in range(iterations): utils.print_on_current_line('Iteration {}/{}'.format(j, iterations)) start_cpu = time.clock() acoc.cost_function(data.T, polygon) end_cpu = time.clock() results[i][j][0] = end_cpu - start_cpu start_gpu = time.clock() acoc.cost_function_gpu(data.T, polygon) end_gpu = time.clock() results[i][j][2] = end_gpu - start_gpu mean_results = np.mean(results, axis=1).T acoc_plotter.plot_bar_chart_gpu_benchmark(mean_results, data_sizes, ['CPython', 'GPU'], save_folder, 'results') np.set_printoptions(precision=7, suppress=False) print("\nResults: \n{}".format(mean_results)) utils.save_object(mean_results, save_folder, 'results')
def _construct_polygon(self, data, plane_string, start_time, print_string): cuda.to_device(data) ant_scores = [] current_best_ant = [] last_level_up_or_best_ant = 0 matrix = AcocMatrix(data, tau_initial=self.config.tau_init) current_best_score = 0 while matrix.level <= self.config.max_level: start_vertex = get_random_weighted(matrix.edges) if self.config.multi_level: if (len(ant_scores) - last_level_up_or_best_ant ) > self.config.level_convergence_rate: matrix.level_up(current_best_ant) last_level_up_or_best_ant = len(ant_scores) _ant = Ant(start_vertex) _ant.move_ant() while not _ant.at_target and not _ant.is_stuck: _ant.move_ant() if _ant.at_target: ant_score = self._score(_ant.edges_travelled, data) if ant_score > current_best_score: current_best_ant = _ant.edges_travelled current_best_score = ant_score last_level_up_or_best_ant = len(ant_scores) if self.config.plot: plotter.plot_path_with_data( current_best_ant, data, matrix, save=True, save_folder=osp.join( self.save_folder, 'best_paths/{}/'.format(plane_string)), file_name='ant' + str(len(ant_scores))) self._put_pheromones(current_best_ant, current_best_score) self._reset_at_random(matrix) ant_scores.append(ant_score) t_elapsed = utils.seconds_to_hms(time.time() - start_time) utils.print_on_current_line( "Level {}/{}, {}, Time elapsed: {}".format( matrix.level, self.config.max_level, print_string, t_elapsed)) return current_best_ant
def run(*args): config = dict(CONFIG) for conf in args: config[conf[0]] = conf[1] data = pickle.load(open('utils/data_sets.pickle', 'rb'))[config['data_set']] number_runs = config['number_runs'] clf = acoc.PolyACO(config) run_times = np.zeros(number_runs, dtype=float) for i in range(number_runs): iter_string = "Iteration: {}/{}".format(i + 1, number_runs) start = time.clock() clf.train(data, print_string=', ' + iter_string) end = time.clock() run_times[i] = end - start utils.print_on_current_line(iter_string) return np.mean(run_times)
def parameter_tester(parameter_name, values, save_folder=None): if save_folder is None: save_folder = utils.generate_folder_name() print("\n\nExperiment for parameter '{}' with values {}".format( parameter_name, values)) plt.clf() all_scores = [] for index, v in enumerate(values): print("Run {} with value {}".format(index + 1, v)) scores = run((parameter_name, v)) all_scores.append(scores) utils.print_on_current_line('') header = ','.join(str(s) for s in values) result_str = header + '\n' + ','.join(["{:.4f}".format(sum(s) / CLASSIFIER_CONFIG.runs) for s in all_scores]) + \ '\n\n' + 'all scores:\n' for a in all_scores: result_str += ','.join('{:.4f}'.format(s) for s in a) + '\n' utils.save_string_to_file(result_str, parent_folder=save_folder, file_name='result_' + parameter_name + '.txt')
def _construct_polygon(self, data, plane_string, start_time, print_string): cuda.to_device(data) ant_scores = [] current_best_ant = [] last_level_up_or_best_ant = 0 matrix = AcocMatrix(data, tau_initial=self.config.tau_init) current_best_score = 0 while matrix.level <= self.config.max_level: start_vertex = get_random_weighted(matrix.edges) if self.config.multi_level: if (len(ant_scores) - last_level_up_or_best_ant) > self.config.level_convergence_rate: matrix.level_up(current_best_ant) last_level_up_or_best_ant = len(ant_scores) _ant = Ant(start_vertex) _ant.move_ant() while not _ant.at_target and not _ant.is_stuck: _ant.move_ant() if _ant.at_target: ant_score = self._score(_ant.edges_travelled, data) if ant_score > current_best_score: current_best_ant = _ant.edges_travelled current_best_score = ant_score last_level_up_or_best_ant = len(ant_scores) if self.config.plot: plotter.plot_path_with_data(current_best_ant, data, matrix, save=True, save_folder=osp.join(self.save_folder, 'best_paths/{}/'.format(plane_string)), file_name='ant' + str(len(ant_scores))) self._put_pheromones(current_best_ant, current_best_score) self._reset_at_random(matrix) ant_scores.append(ant_score) t_elapsed = utils.seconds_to_hms(time.time() - start_time) utils.print_on_current_line("Level {}/{}, {}, Time elapsed: {}".format(matrix.level, self.config.max_level, print_string, t_elapsed)) return current_best_ant
def benchmark_cost_function(data_sizes): polygon = pickle.load(open('utils/good_path_for_rectangle.pickle', 'rb')) save_folder = generate_folder_name() iterations = 10 results = np.empty((len(data_sizes), iterations, 2), dtype=float) for i, dsize in enumerate(data_sizes): data = dg.generate_rectangle_set(dsize) print("\nRun {} with value {}".format(i + 1, dsize)) # Compile functions and warm up GPU acoc.cost_function_gpu(data.T, polygon) for j in range(iterations): utils.print_on_current_line('Iteration {}/{}'.format( j, iterations)) start_cpu = time.clock() acoc.cost_function(data.T, polygon) end_cpu = time.clock() results[i][j][0] = end_cpu - start_cpu start_gpu = time.clock() acoc.cost_function_gpu(data.T, polygon) end_gpu = time.clock() results[i][j][2] = end_gpu - start_gpu mean_results = np.mean(results, axis=1).T acoc_plotter.plot_bar_chart_gpu_benchmark(mean_results, data_sizes, ['CPython', 'GPU'], save_folder, 'results') np.set_printoptions(precision=7, suppress=False) print("\nResults: \n{}".format(mean_results)) utils.save_object(mean_results, save_folder, 'results')