def main(): # Loading the config file. cfg = ut.get_config() # Creating the searcher. searcher = RandomSearcher(ss.search_space_fn) # Creating the search folder for logging information. sl.create_search_folderpath(cfg['folderpath'], cfg['search_name'], abort_if_exists=cfg["abort_if_exists"], delete_if_exists=cfg['delete_if_exists'], create_parent_folders=True) # Search loop. for evaluation_id in range(cfg['num_samples']): logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"], evaluation_id) if not logger.config_exists(): (inputs, outputs, hyperp_value_lst, searcher_eval_token) = searcher.sample() # Logging results (including graph). logger.log_config(hyperp_value_lst, searcher_eval_token) vi.draw_graph( outputs, draw_module_hyperparameter_info=False, print_to_screen=False, out_folderpath=logger.get_evaluation_data_folderpath())
def main(): # Loading the config file. cfg = ut.get_config() num_classes = 10 num_samples = cfg['num_samples'] # Loading the data. (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist') train_dataset = InMemoryDataset(Xtrain, ytrain, True) val_dataset = InMemoryDataset(Xval, yval, False) test_dataset = InMemoryDataset(Xtest, ytest, False) # Creating up the evaluator. evaluator = SimpleClassifierEvaluator( train_dataset, val_dataset, num_classes, './temp', max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'], log_output_to_terminal=True, test_dataset=test_dataset) # Creating the search space. search_space_fn = lambda: css_dnn.dnn_net(num_classes) search_space_factory = mo.SearchSpaceFactory(search_space_fn) sl.create_search_folderpath( cfg["folderpath"], cfg["search_name"], delete_if_exists=cfg['delete_if_exists'], abort_if_exists=False, create_parent_folders=True) # Creating the searcher. searcher = RandomSearcher(search_space_factory.get_search_space) # Search loop. for evaluation_id in range(num_samples): eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"], evaluation_id) if not eval_logger.config_exists(): inputs, outputs, hyperp_value_lst, eval_token = searcher.sample() results = evaluator.eval(inputs, outputs) # Logging results (including graph). eval_logger.log_config(hyperp_value_lst, eval_token) eval_logger.log_results(results) vi.draw_graph( outputs, draw_module_hyperparameter_info=True, print_to_screen=False, out_folderpath=eval_logger.get_evaluation_data_folderpath()) # Updating the searcher given the results of logging. searcher.update(results['validation_accuracy'], eval_token)
def main(): num_classes = 10 num_samples = 8 (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist') train_dataset = InMemoryDataset(Xtrain, ytrain, True) val_dataset = InMemoryDataset(Xval, yval, False) test_dataset = InMemoryDataset(Xtest, ytest, False) evaluator = SimpleClassifierEvaluator( train_dataset, val_dataset, num_classes, './temp', max_eval_time_in_minutes=1.0, log_output_to_terminal=True) search_space_fn = lambda: css_dnn.dnn_net(num_classes) searcher = RandomSearcher(search_space_fn) for _ in range(num_samples): inputs, outputs, searcher_eval_token, _ = searcher.sample() val_acc = evaluator.eval(inputs, outputs)['validation_accuracy'] searcher.update(val_acc, searcher_eval_token)
from deep_architect.searchers.random import RandomSearcher from deep_architect.searchers.mcts import MCTSSearcher from deep_architect.searchers.smbo_random import SMBOSearcher from deep_architect.searchers.smbo_mcts import SMBOSearcherWithMCTSOptimizer from deep_architect.surrogates.hashing import HashingSurrogate name_to_get_searcher_fn = { 'random': lambda ssf: RandomSearcher(ssf), 'smbo_rand_256': lambda ssf: SMBOSearcher(ssf, HashingSurrogate(2048, 1), 256, 0.1), 'smbo_rand_512': lambda ssf: SMBOSearcher(ssf, HashingSurrogate(2048, 1), 512, 0.1), 'smbo_mcts_256': lambda ssf: SMBOSearcherWithMCTSOptimizer(ssf, HashingSurrogate(2048, 1), 256, 0.1, 1), 'smbo_mcts_512': lambda ssf: SMBOSearcherWithMCTSOptimizer(ssf, HashingSurrogate(2048, 1), 512, 0.1, 1) }
# This is the number of total models to be evaluated in search num_total_models = 25 # Now we set up the datasets and the search space factory. X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist', normalize_range=True) train_dataset = InMemoryDataset(X_train, y_train, True) val_dataset = InMemoryDataset(X_val, y_val, False) ssf = mo.SearchSpaceFactory(lambda: dnn.dnn_net(10)) # Each process should have a unique rank. The process with rank 0 will act as the # master process that is in charge of the searcher. Every other process acts # as a worker that evaluates architectures sent to them. if comm.get_rank() == 0: searcher = RandomSearcher(ssf.get_search_space) models_sampled = 0 killed = 0 finished = 0 # This process keeps going as long as we have not received results for all sampled # models and not all the worker processes have been killed. Kill signals start # being sent out once the searcher has finished sampling the number of models # specified by the `num_total_models` parameter while finished < models_sampled or killed < comm.num_workers: if models_sampled < num_total_models: # Now, we check the communicator to see if worker queue is ready for a new # architecture. If so, we publish an architecture to the worker queue. if comm.is_ready_to_publish_architecture():