def evaluate(i, trainset, testset, cp_name): pop, _, _ = load_checkpoint(cp_name) # print(pop) # load the whole data X_train, y_train = load_data("data/" + trainset) X_test, y_test = load_data("data/" + testset) E_train, E_test = [], [] # list of accuracies for _ in range(5): network = pop[i].createNetwork() network.fit(X_train, y_train, batch_size=Config.batch_size, nb_epoch=20, verbose=0) yy_train = network.predict(X_train) E_train.append(error(yy_train, y_train)) yy_test = network.predict(X_test) E_test.append(error(yy_test, y_test)) print_stat(E_train, "train") print_stat(E_test, "test") print(pop[i].fitness.values)
def evaluate(i, cp_name, data_source, trainset, testset): """ Evaluate the I-th individual from the pareto front. """ _, front, _, cfg = load_checkpoint(cp_name) config.global_config.update(cfg) if data_source is None: assert trainset is None and testset is None data_source = config.global_config["dataset"]["source_type"] trainset = config.global_config["dataset"]["name"] test_name = config.global_config["dataset"]["test_name"] else: test_name = testset # load the whole data X_train, y_train = load_data(data_source, trainset) X_test, y_test = load_data(data_source, trainset, test=True, test_name=test_name) ind = front[i] E_train, E_test = eval_mean(ind, X_train, y_train, X_test, y_test) print(i, ": ", end="") print_stat(E_train, "train") print(i, ": ", end="") print_stat(E_test, "test") print(i, ": ", end="") print(ind.fitness.values) print(flush=True)
def eval_front(trainset, testset, cp_name): _, front, _ = load_checkpoint(cp_name) # load the whole data X_train, y_train = load_data("data/" + trainset) X_test, y_test = load_data("data/" + testset) for i, ind in enumerate(front): E_train, E_test = eval_mean(ind, X_train, y_train, X_test, y_test) print(i, ": ", end="") print_stat(E_train, "train") print(i, ": ", end="") print_stat(E_test, "test") print(i, ": ", end="") print(ind.fitness.values) print(flush=True)
def run_with_executor(self, use_gpu, feed=None, repeat=1, log_level=0, check_output=False, profiler="none"): self.place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() executor = fluid.Executor(self.place) executor.run(self.startup_program) if feed is None: feed = self._feed_random_data(use_gpu, as_lodtensor=True) runtimes = [] fetches = [] outputs = None with profile_context(self.name, use_gpu, profiler): for i in xrange(repeat): begin = time.time() outputs = executor.run(program=self.main_program, feed=feed, fetch_list=self.fetch_vars, use_program_cache=True, return_numpy=True) end = time.time() runtimes.append(end - begin) if check_output: fetches.append(outputs) if check_output: stable, max_diff = self._check_consistency(fetches) stats = {"total": runtimes, "stable": stable, "diff": max_diff} else: stats = {"total": runtimes} stats["framework"] = "paddle" stats["version"] = paddle.__version__ stats["name"] = self.name stats["device"] = "GPU" if use_gpu else "CPU" utils.print_stat(stats, log_level=log_level) return outputs
def run(self, use_gpu, feed=None, repeat=1, log_level=0, check_output=False, profile=False): sess = self._init_session(use_gpu) #tf.debugging.set_log_device_placement(True) if profile: profiler = model_analyzer.Profiler(graph=sess.graph) run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: profiler = None run_options = None run_metadata = None self.timeline_dict = None if feed is None: feed = self._feed_random_data() runtimes = [] fetches = [] outputs = None for i in range(repeat): begin = time.time() outputs = sess.run(fetches=self.fetch_list, feed_dict=feed, options=run_options, run_metadata=run_metadata) end = time.time() runtimes.append(end - begin) if profile: # Update profiler profiler.add_step(step=i, run_meta=run_metadata) # For timeline tl = timeline.Timeline(run_metadata.step_stats) chrome_trace = tl.generate_chrome_trace_format() trace_file = open(self.name + '_tf.timeline', 'w') trace_file.write(chrome_trace) #self._update_timeline(chrome_trace) if check_output: fetches.append(outputs) if profile: # Generate profiling result profile_op_builder = option_builder.ProfileOptionBuilder() profile_op_builder.select(['micros', 'occurrence']) profile_op_builder.order_by('micros') profile_op_builder.with_max_depth(10) profiler.profile_operations(profile_op_builder.build()) # Generate timeline # profile_graph_builder = option_builder.ProfileOptionBuilder( # option_builder.ProfileOptionBuilder.time_and_memory()) # profile_graph_builder.with_timeline_output(timeline_file=self.name + '_tf.timeline') # profile_graph_builder.with_step(10) # profiler.profile_graph(profile_graph_builder.build()) #tl_output_file = self.name + "_tf.timeline" #with open(tl_output_file, 'w') as f: # json.dump(self.timeline_dict, f) stats = { "framework": "tensorflow", "version": tf.__version__, "name": self.name, "total": runtimes } stats["device"] = "GPU" if use_gpu else "CPU" utils.print_stat(stats, log_level=log_level) return outputs
def run_with_core_executor(self, use_gpu, feed=None, repeat=1, log_level=0, check_output=False, profiler="none"): self.place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() executor = fluid.Executor(self.place) executor.run(self.startup_program) # Use to run main_program place = fluid.core.Place() place.set_place(self.place) core_executor = fluid.core.Executor(place) fetch_list_str = [] for var in self.fetch_vars: fetch_list_str.append(var.name) ctx = core_executor.prepare(self.main_program.desc, 0, fetch_list_str, False) core_executor.create_variables(self.main_program.desc, self.scope, 0) if feed is None: feed = self._feed_random_data(use_gpu, as_lodtensor=False) feed_times = [] fetch_times = [] compute_times = [] runtimes = [] fetches = [] outputs = None with profile_context(self.name, use_gpu, profiler): for i in xrange(repeat): begin = time.time() self._init_feed_tensor(feed) feed_end = time.time() core_executor.run_prepared_ctx(ctx, self.scope, False, False, False) compute_end = time.time() outputs = self._get_fetch_tensor() fetch_end = time.time() runtimes.append(fetch_end - begin) feed_times.append(feed_end - begin) compute_times.append(compute_end - feed_end) fetch_times.append(fetch_end - compute_end) if check_output: fetches.append(outputs) if check_output: stable, max_diff = self._check_consistency(fetches) stats = { "total": runtimes, "feed": feed_times, "compute": compute_times, "fetch": fetch_times, "stable": stable, "diff": max_diff } else: stats = { "total": runtimes, "feed": feed_times, "compute": compute_times, "fetch": fetch_times } stats["framework"] = "paddle" stats["version"] = paddle.__version__ stats["name"] = self.name stats["device"] = "GPU" if use_gpu else "CPU" utils.print_stat(stats, log_level=log_level) return outputs