def init_model(version=0.9, download_dir="./301model"): # Note: Uses 0.9 as the default models, switch to 1.0 to use 1.0 models models_0_9_dir = os.path.join(download_dir, 'nb_models_0.9') model_paths_0_9 = { model_name: os.path.join(models_0_9_dir, '{}_v0.9'.format(model_name)) for model_name in ['xgb', 'gnn_gin', 'lgb_runtime'] } models_1_0_dir = os.path.join(download_dir, 'nb_models_1.0') model_paths_1_0 = { model_name: os.path.join(models_1_0_dir, '{}_v1.0'.format(model_name)) for model_name in ['xgb', 'gnn_gin', 'lgb_runtime'] } model_paths = model_paths_0_9 if version == '0.9' else model_paths_1_0 # If the models are not available at the paths, automatically download # the models # Note: If you would like to provide your own model locations, comment this out if not all(os.path.exists(model) for model in model_paths.values()): nb.download_models(version=version, delete_zip=True, download_dir=download_dir) # Load the performance surrogate model # NOTE: Loading the ensemble will set the seed to the same as used during training (logged in the model_configs.json) # NOTE: Defaults to using the default model download path ensemble_dir_performance = model_paths['xgb'] performance_model = nb.load_ensemble(ensemble_dir_performance) # Load the runtime surrogate model # NOTE: Defaults to using the default model download path ensemble_dir_runtime = model_paths['lgb_runtime'] runtime_model = nb.load_ensemble(ensemble_dir_runtime) return performance_model, runtime_model
def __init__(self, data_folder=default_data_folder ): self.dataset = 'cifar10' self.search_space = 'nasbench_301' ensemble_dir_performance = os.path.expanduser(data_folder + 'nb_models/xgb_v0.9') performance_model = nb.load_ensemble(ensemble_dir_performance) ensemble_dir_runtime = os.path.expanduser(data_folder + 'nb_models/lgb_runtime_v0.9') runtime_model = nb.load_ensemble(ensemble_dir_runtime) self.nasbench = [performance_model, runtime_model] self.index_hash = None
def get_darts_api(dataset=None): """ Load the nb301 training data (which contains full learning curves) and the nb301 models """ import nasbench301 data_folder = os.path.join(get_project_root(), 'data/') with open(os.path.join(data_folder, 'nb301_full_training.pickle'), 'rb') as f: nb301_data = pickle.load(f) nb301_arches = list(nb301_data.keys()) performance_model = nasbench301.load_ensemble(os.path.join(data_folder + 'nb301_models/xgb_v1.0')) runtime_model = nasbench301.load_ensemble(os.path.join(data_folder + 'nb301_models/lgb_runtime_v1.0')) nb301_model = [performance_model, runtime_model] return {'nb301_data': nb301_data, 'nb301_arches':nb301_arches, 'nb301_model':nb301_model}
def __init__( self, dataset, weights_manager, objective, rollout_type="nb301", with_noise=True, path=None, schedule_cfg=None, ): super(NB301Evaluator, self).__init__(dataset, weights_manager, objective, rollout_type) assert path is not None, "must specify benchmark path" self.path = path self.with_noise = with_noise self.perf_model = nb.load_ensemble(path) self.genotype_type = namedtuple( "Genotype", "normal normal_concat reduce reduce_concat")
from collections import namedtuple from ConfigSpace.read_and_write import json as cs_json import nasbench301 as nb # Load the performance surrogate model #NOTE: Loading the ensemble will set the seed to the same as used during training (logged in the model_configs.json) print("==> Loading performance surrogate model...") ensemble_dir_performance = "/path/to/nb_models/gnn_gin_v0.9" performance_model = nb.load_ensemble(ensemble_dir_performance) # Load the runtime surrogate model print("==> Loading runtime surrogate model...") ensemble_dir_runtime = "/path/to/nb_models/lgb_runtime_v0.9" runtime_model = nb.load_ensemble(ensemble_dir_runtime) # Option 1: Create a DARTS genotype print("==> Creating test configs...") Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') genotype_config = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
def main(args): path1 = os.path.abspath(sys.argv[1]) path2 = os.path.abspath(sys.argv[2]) dirname1 = os.path.dirname(path1) dirname2 = os.path.dirname(path2) resultspath = os.path.join(dirname1, 'results.json') results1 = parse(path1) results2 = parse(path2) print('Loading nasbench models') current_dir = os.path.dirname(os.path.abspath(__file__)) model_dir = os.path.join(current_dir, 'nb_models') models = { 'xgb_performance': os.path.join(model_dir, 'xgb_v1.0'), 'gnngin_performance': os.path.join(model_dir, 'gnn_gin_v1.0'), 'lgb_runtime': os.path.join(model_dir, 'lgb_runtime_v1.0') } xgb_performance = nb.load_ensemble(models['xgb_performance']) gnngin_performance = nb.load_ensemble(models['gnngin_performance']) lgb_runtime = nb.load_ensemble(models['lgb_runtime']) results1['xgb_performance'] = [ xgb_performance.predict(config=genome, representation='genotype', with_noise=True) for genome in results1['genotype'] ] results1['gnngin_performance'] = [ gnngin_performance.predict(config=genome, representation='genotype', with_noise=True) for genome in results1['genotype'] ] results1['lgb_runtime'] = [ lgb_runtime.predict(config=genome, representation='genotype', with_noise=True) for genome in results1['genotype'] ] results2['xgb_performance'] = [ xgb_performance.predict(config=genome, representation='genotype', with_noise=True) for genome in results2['genotype'] ] results2['gnngin_performance'] = [ gnngin_performance.predict(config=genome, representation='genotype', with_noise=True) for genome in results2['genotype'] ] results2['lgb_runtime'] = [ lgb_runtime.predict(config=genome, representation='genotype', with_noise=True) for genome in results2['genotype'] ] with open(resultspath, 'w') as resultsfile: results1['genotype'] = list(map(str, results1['genotype'])) results2['genotype'] = list(map(str, results2['genotype'])) results = {'darts': results1, 'fft': results2} json.dump(results, resultsfile) # Plot the results fig, (ax1, ax2) = plt.subplots(1, 2) for t in ['darts', 'fft']: ax1.plot(results[t]['epoch'], results[t]['train'], label=f'{t}_train') ax1.plot(results[t]['epoch'], results[t]['valid'], label=f'{t}_valid') ax2.plot(results[t]['epoch'], results[t]['xgb_performance'], label=f'{t}_xgb') ax2.plot(results[t]['epoch'], results[t]['gnngin_performance'], label=f'{t}_gnngin') ax1.legend() ax2.legend() plt.show()