def main(): """ Loads the config file, creates the library and controller, and starts the training loop. """ # Load the config file config_filename = 'config.json' with open(config_filename, encoding='utf-8') as f: config = json.load(f) config_dataset = config["dataset"] # Problem specification hyperparameters config_training = config["training"] # Training hyperparameters config_controller = config["controller"] # Controller hyperparameters # Define the dataset and library dataset = Dataset(**config_dataset) Program.set_training_data(dataset) Program.set_library(dataset.function_set, dataset.n_input_var) print("Ground truth expression:\n{}".format(indent(dataset.pretty(), '\t'))) with tf.Session() as sess: # Instantiate the controller controller = Controller(sess, debug=config_training["debug"], summary=config_training["summary"], **config_controller) learn(sess, controller, **config_training)
def setup(self, seed=0): # Clear the cache, reset the compute graph, and set the seed Program.clear_cache() tf.reset_default_graph() self.seed(seed) # Must be called _after_ resetting graph self.pool = self.make_pool() self.prior = self.make_prior() # if self.config_task['enforce_sum']: # n_tensors = 0 # for input in self.config_task['dataset_info']['input']: # # count the number of tensors in the inputs, if any: # if input in ['T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T10']: # n_tensors += 1 # else: # n_tensors = 1 # for ii in range(n_tensors): graph = tf.Graph() with graph.as_default(): self.seed(seed) self.sess = tf.Session() new_controller = Controller(self.sess, self.prior, self.config_task['enforce_sum'], seed, **self.config_controller) new_controller.sess.run(tf.global_variables_initializer()) # initializer should be part of the graph self.controller = new_controller
def make_controller(self): controller = [] for sess in self.sess: controller.append(Controller(sess, self.prior, **self.config_controller)) return controller
def fit(self, X, y): # Define the dataset and library dataset = Dataset(X, y) Program.clear_cache() Program.set_training_data(dataset) Program.set_library( ['add', 'sub', 'mul', 'div', 'sin', 'cos', 'exp', 'log'], X.shape[1]) tf.reset_default_graph() # Shift actual seed by checksum to ensure it's different across different benchmarks tf.set_random_seed(0) with tf.Session() as sess: # Instantiate the controller controller = Controller(sess, debug=False, summary=False, **self.config_controller) # Train the controller result: Program = learn( sess, controller, **self.config_training, return_estimator=True) # r, base_r, expression, traversal self.result = result
def train_dsr(name_and_seed, config_dataset, config_controller, config_training): """Trains DSR and returns dict of reward, expression, and traversal""" name, seed = name_and_seed try: import tensorflow as tf from dsr.controller import Controller from dsr.train import learn # Ignore TensorFlow warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) except: pass start = time.time() # Rename the output file config_training["output_file"] = "dsr_{}_{}.csv".format(name, seed) # Define the dataset and library dataset = get_dataset(name, config_dataset) Program.clear_cache() Program.set_training_data(dataset) Program.set_library(dataset.function_set, dataset.n_input_var) tf.reset_default_graph() # Shift actual seed by checksum to ensure it's different across different benchmarks tf.set_random_seed(seed + zlib.adler32(name.encode("utf-8"))) with tf.Session() as sess: # Instantiate the controller controller = Controller(sess, debug=config_training["debug"], summary=config_training["summary"], **config_controller) # Train the controller result = learn(sess, controller, **config_training) # r, base_r, expression, traversal result["name"] = name result["t"] = time.time() - start result["seed"] = seed return result
def make_controller(self): controller = Controller(self.sess, self.prior, **self.config_controller) return controller