def test(self, test_data_dir): test_problems_loader = init_problems_loader(test_data_dir) results = [] while test_problems_loader.has_next(): test_problems, test_filename = test_problems_loader.get_next() epoch_test_cost = 0.0 accuracy_by_vars = [] accuracy_by_prob = [] # epoch_test_mat = ConfusionMatrix() for problem in test_problems: d = self.build_feed_dict(problem) logits, cost = self.sess.run([self.logits, self.cost], feed_dict=d) epoch_test_cost += cost (ac_by_var, ac_by_pro) = self.accuracy(np.array(logits), np.array(problem.labels)) accuracy_by_vars.append(ac_by_var) accuracy_by_prob.append(ac_by_pro) # epoch_test_mat.update(problem.is_sat, logits > 0) epoch_test_cost /= len(test_problems) av_ac_by_var = np.mean(accuracy_by_vars) av_ac_by_pro = np.mean(accuracy_by_prob) # epoch_test_mat = epoch_test_mat.get_percentages() results.append((test_filename, epoch_test_cost, av_ac_by_var, av_ac_by_pro)) return results
def train_epoch(self, epoch): if self.train_problems_loader is None: self.train_problems_loader = init_problems_loader(self.opts.train_dir) epoch_start = time.clock() epoch_train_cost = 0.0 accuracy_by_var = [] accuracy_by_problem = [] # epoch_train_mat = ConfusionMatrix() train_problems, train_filename = self.train_problems_loader.get_next() for problem in train_problems: d = self.build_feed_dict(problem) _, logits, cost = self.sess.run([self.apply_gradients, self.logits, self.cost], feed_dict=d) epoch_train_cost += cost (av, ap) = self.accuracy(np.array(logits), np.array(problem.labels)) accuracy_by_var.append(av) accuracy_by_problem.append(ap) # epoch_train_mat.update(problem.is_sat, logits > 0) epoch_train_cost /= len(train_problems) av_ac_by_var = np.mean(accuracy_by_var) av_ac_by_pro = np.mean(accuracy_by_problem) # epoch_train_mat = epoch_train_mat.get_percentages() epoch_end = time.clock() learning_rate = self.sess.run(self.learning_rate) self.save(epoch) return (train_filename, epoch_train_cost, av_ac_by_var, av_ac_by_pro, learning_rate, epoch_end - epoch_start)
def train_epoch(self, epoch): if self.train_problems_loader is None: self.train_problems_loader = init_problems_loader( self.opts.train_dir) epoch_start = time.clock() epoch_train_cost = 0.0 epoch_train_mat = ConfusionMatrix() train_problems, train_filename = self.train_problems_loader.get_next() for problem in train_problems: d = self.build_feed_dict(problem) _, logits, cost = self.sess.run( [self.apply_gradients, self.logits, self.cost], feed_dict=d) epoch_train_cost += cost epoch_train_mat.update(problem.is_sat, logits > 0) epoch_train_cost /= len(train_problems) epoch_train_mat = epoch_train_mat.get_percentages() epoch_end = time.clock() learning_rate = self.sess.run(self.learning_rate) self.save(epoch) return (train_filename, epoch_train_cost, epoch_train_mat, learning_rate, epoch_end - epoch_start)
def train_epoch(self, epoch): if self.train_problems_loader is None: self.train_problems_loader = init_problems_loader( self.opts.train_dir) epoch_start = time.clock() epoch_train_cost = 0.0 # accuracy_by_var = [] # accuracy_by_problem = [] epoch_train_mat = ConfusionMatrix() train_problems, train_filename = self.train_problems_loader.get_next() for problem in train_problems: d = self.build_feed_dict(problem) A_policy, L_policy = self.sess.run([self.A_policy, self.L_policy], feed_dict=d) A_move, L_move = self.sample_moves(A_policy, L_policy) ## TO HERE; need to evaluate is_sat ## serious concern: by random, only 4 percent of games are sat (the base is biased!!!) _, logits, cost = self.sess.run( [self.apply_gradients, self.logits, self.cost], feed_dict=d) epoch_train_cost += cost (av, ap) = self.accuracy(np.array(logits), np.array(problem.labels)) accuracy_by_var.append(av) accuracy_by_problem.append(ap) # epoch_train_mat.update(problem.is_sat, logits > 0) epoch_train_cost /= len(train_problems) av_ac_by_var = np.mean(accuracy_by_var) av_ac_by_pro = np.mean(accuracy_by_problem) # epoch_train_mat = epoch_train_mat.get_percentages() epoch_end = time.clock() learning_rate = self.sess.run(self.learning_rate) self.save(epoch) return (train_filename, epoch_train_cost, av_ac_by_var, av_ac_by_pro, learning_rate, epoch_end - epoch_start)
def test(self, test_data_dir): test_problems_loader = init_problems_loader(test_data_dir) results = [] while test_problems_loader.has_next(): test_problems, test_filename = test_problems_loader.get_next() epoch_test_cost = 0.0 epoch_test_mat = ConfusionMatrix() for problem in test_problems: d = self.build_feed_dict(problem) logits, cost = self.sess.run([self.logits, self.cost], feed_dict=d) epoch_test_cost += cost epoch_test_mat.update(problem.is_sat, logits > 0) epoch_test_cost /= len(test_problems) epoch_test_mat = epoch_test_mat.get_percentages() results.append((test_filename, epoch_test_cost, epoch_test_mat)) return results
def train_epoch(self, epoch): if self.train_problems_loader is None: self.train_problems_loader = init_problems_loader(self.opts.train_dir) epoch_start = time.clock() epoch_train_cost = 0.0 train_problems, train_filename = self.train_problems_loader.get_next() for problem in train_problems: samples = [sampler.sample(self.opts.list_size) for sampler in problem.sampler] candidates = [sample[0] for sample in samples] labels = [sample[1] for sample in samples] d = self.build_feed_dict(problem, candidates, labels) _, logits, cost = self.sess.run([self.apply_gradients, self.grades, self.cost], feed_dict=d) epoch_train_cost += cost epoch_train_cost /= len(train_problems) epoch_end = time.clock() learning_rate = self.sess.run(self.learning_rate) self.save(epoch) return (train_filename, epoch_train_cost, learning_rate, epoch_end - epoch_start)
def train_epoch(self, epoch): if self.train_problems_loader is None: self.train_problems_loader = init_problems_loader(self.opts.train_dir) epoch_start = time.clock() epoch_train_cost = 0.0 epoch_train_mat = ConfusionMatrix() train_problems, train_filename = self.train_problems_loader.get_next() for problem in train_problems: d = self.build_feed_dict(problem) self.batchnum = problem.n_vars//len(problem.is_sat) acc,feature_map_out, _, logits, cost, image, issat, XX = self.sess.run( [self.accuracy, self.all_votes, self.apply_gradients, self.logits, self.cost, self.image, self.is_sat, self.merged], feed_dict=d) epoch_train_cost += cost epoch_train_mat.update(problem.is_sat, logits > 0) # print(acc) epoch_train_cost /= len(train_problems) epoch_train_mat = epoch_train_mat.get_percentages() epoch_end = time.clock() learning_rate = self.sess.run(self.learning_rate) self.save(epoch) self.train_writer.add_summary(XX, epoch) return (train_filename, epoch_train_cost, epoch_train_mat, learning_rate, epoch_end - epoch_start)
default=100000, help='Number of epochs through data') parser.add_argument('--n_saves_to_keep', action='store', dest='n_saves_to_keep', type=int, default=4, help='Number of saved models to keep') opts = parser.parse_args() setattr(opts, 'commit', subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()) setattr(opts, 'hostname', subprocess.check_output(['hostname']).strip()) if opts.run_id is None: opts.run_id = random.randrange(sys.maxsize) print(opts) if not os.path.exists("snapshots/"): os.mkdir("snapshots") problem_loader = init_problems_loader(opts.train_dir) problem = problem_loader.get_next()[0][0] nn_filter = NeuroSAT(opts) envs = [env(abs_filename, i, keep_history=True) for i in range(20)] for i in range(10): one_batch_eval(envs, problem, nn_filter) for j in range(i + 1): one_batch_train(envs, problem, nn_filter, 40) list(map(lambda x: x.reset_state(), envs))