def log_config(self, hyperp_value_lst, searcher_evaluation_token): """Logs a config JSON that describing the evaluation to be done. The config JSON has the list with the ordered sequence of hyperparameter values that allow to replicate the same evaluation given the same search space; the searcher evaluation token, that can be given back to the same searcher allowing it to update back its state. The searcher evaluation token is returned by the searcher when a new architecture to evaluate is sampled. See, for example, :meth:`deep_architect.searchers.MCTSSearcher.sample`. The format of the searcher evaluation token is searcher dependent, but it should be JSON serializable in all cases. Creates ``config.json`` in the evaluation log folder. Args: hyperp_value_lst (list[object]): List with the sequence of JSON serializable hyperparameter values that define the architecture to evaluate. searcher_evaluation_token (dict[str, object]): Dictionary that is JSON serializable and it is enough, when given back to the searcher along with the results, for the searcher to update its state appropriately. """ assert not ut.file_exists(self.config_filepath) config_d = { 'hyperp_value_lst': hyperp_value_lst, 'searcher_evaluation_token': searcher_evaluation_token } ut.write_jsonfile(config_d, self.config_filepath)
def log_results(self, results): """Logs the results of evaluating an architecture. The dictionary contains many metrics about the architecture.. In machine learning, this often involves training the model on a training set and evaluating the model on a validation set. In domains different than machine learning, other forms of evaluation may make sense. Creates ``results.json`` in the evaluation log folder. Args: dict[object]: Dictionary of JSON serializable metrics and information about the evaluated architecture. """ assert (not ut.file_exists(self.results_filepath)) assert ut.file_exists(self.config_filepath) assert isinstance(results, dict) ut.write_jsonfile(results, self.results_filepath)
def load_state(self, folder_name): filepath = join_paths([folder_name, 'evolution_searcher.json']) if not file_exists(filepath): raise RuntimeError("Load file does not exist") state = read_jsonfile(filepath) self.P = state["P"] self.S = state["S"] self.regularized = state['regularized'] self.population = deque(state['population']) self.initializing = state['initializing']
def process_config_and_args(): parser = argparse.ArgumentParser("MPI Job for architecture search") parser.add_argument('--config', '-c', action='store', dest='config_name', default='normal') parser.add_argument( '--config-file', action='store', dest='config_file', default= '/deep_architect/kubernetes/mongo_communicator/train_best_config.json') parser.add_argument('--bucket', '-b', action='store', dest='bucket', default=BUCKET_NAME) # Other arguments parser.add_argument('--resume', '-r', action='store_true', dest='resume', default=False) parser.add_argument('--mongo-host', '-m', action='store', dest='mongo_host', default='127.0.0.1') parser.add_argument('--mongo-port', '-p', action='store', dest='mongo_port', default=27017) parser.add_argument('--repetition', default=0) options = parser.parse_args() configs = ut.read_jsonfile(options.config_file) config = configs[options.config_name] config['bucket'] = options.bucket comm = MongoCommunicator(host=options.mongo_host, port=options.mongo_port, data_refresher=True) # SET UP GOOGLE STORE FOLDER config['search_name'] = config['search_name'] + '_' + str( options.repetition) search_logger = sl.SearchLogger(config['search_folder'], config['search_name']) search_data_folder = search_logger.get_search_data_folderpath() config['save_filepath'] = ut.join_paths( (search_data_folder, config['searcher_file_name'])) config['eval_path'] = sl.get_all_evaluations_folderpath( config['search_folder'], config['search_name']) config['full_search_folder'] = sl.get_search_folderpath( config['search_folder'], config['search_name']) config['results_file'] = os.path.join( config['results_prefix'] + '_' + str(options.repetition), config['results_file']) state = {'finished': 0, 'best_accuracy': 0.0} if options.resume: try: download_folder(search_data_folder, config['full_search_folder'], config['bucket']) searcher.load_state(search_data_folder) if ut.file_exists(config['save_filepath']): old_state = ut.read_jsonfile(config['save_filepath']) state['finished'] = old_state['finished'] state['best_accuracy'] = old_state['best_accuracy'] except: pass return comm, search_logger, state, config
def results_exist(self): return ut.file_exists(self.results_filepath)
def read_results(self): assert ut.file_exists(self.results_filepath) return ut.read_jsonfile(self.results_filepath)
def config_exists(self): return ut.file_exists(self.config_filepath)
def read_config(self): assert ut.file_exists(self.config_filepath) return ut.read_jsonfile(self.config_filepath)
def process_config_and_args(): parser = argparse.ArgumentParser("MPI Job for architecture search") parser.add_argument('--config', '-c', action='store', dest='config_name', default='normal') parser.add_argument( '--config-file', action='store', dest='config_file', default= '/deep_architect/examples/contrib/kubernetes/experiment_config.json') parser.add_argument('--bucket', '-b', action='store', dest='bucket', default=BUCKET_NAME) # Other arguments parser.add_argument('--resume', '-r', action='store_true', dest='resume', default=False) parser.add_argument('--mongo-host', '-m', action='store', dest='mongo_host', default='127.0.0.1') parser.add_argument('--mongo-port', '-p', action='store', dest='mongo_port', default=27017) parser.add_argument('--log', choices=['debug', 'info', 'warning', 'error'], default='info') parser.add_argument('--repetition', default=0) options = parser.parse_args() numeric_level = getattr(logging, options.log.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % options.log) logging.getLogger().setLevel(numeric_level) configs = ut.read_jsonfile(options.config_file) config = configs[options.config_name] config['bucket'] = options.bucket comm = MongoCommunicator(host=options.mongo_host, port=options.mongo_port, data_refresher=True, refresh_period=10) datasets = { 'cifar10': ('data/cifar10/', 10), } _, num_classes = datasets[config['dataset']] search_space_factory = name_to_search_space_factory_fn[ config['search_space']](num_classes) config['save_every'] = 1 if 'save_every' not in config else config[ 'save_every'] searcher = name_to_searcher_fn[config['searcher']]( search_space_factory.get_search_space) config['num_epochs'] = -1 if 'epochs' not in config else config['epochs'] config[ 'num_samples'] = -1 if 'samples' not in config else config['samples'] # SET UP GOOGLE STORE FOLDER config['search_name'] = config['search_name'] + '_' + str( options.repetition) search_logger = sl.SearchLogger(config['search_folder'], config['search_name']) search_data_folder = search_logger.get_search_data_folderpath() config['save_filepath'] = ut.join_paths( (search_data_folder, config['searcher_file_name'])) config['eval_path'] = sl.get_all_evaluations_folderpath( config['search_folder'], config['search_name']) config['full_search_folder'] = sl.get_search_folderpath( config['search_folder'], config['search_name']) config['eval_hparams'] = {} if 'eval_hparams' not in config else config[ 'eval_hparams'] state = { 'epochs': 0, 'models_sampled': 0, 'finished': 0, 'best_accuracy': 0.0 } if options.resume: try: download_folder(search_data_folder, config['full_search_folder'], config['bucket']) searcher.load_state(search_data_folder) if ut.file_exists(config['save_filepath']): old_state = ut.read_jsonfile(config['save_filepath']) state['epochs'] = old_state['epochs'] state['models_sampled'] = old_state['models_sampled'] state['finished'] = old_state['finished'] state['best_accuracy'] = old_state['best_accuracy'] except: pass return comm, search_logger, searcher, state, config