def pre_compute(self, xtrain, xtest): """ All of this computation could go into fit() and query(), but we do it here to save time, so that we don't have to re-compute Jacobian covariances for all train_sizes when running experiment_types that vary train size or fidelity. """ self.xtrain_zc_info = {} self.xtest_zc_info = {} if len(self.zero_cost) > 0: self.train_loader, _, _, _, _ = utils.get_train_val_loaders( self.config, mode='train') for method_name in self.zero_cost: zc_method = ZeroCostEstimators(self.config, batch_size=64, method_type=method_name) zc_method.train_loader = copy.deepcopy(self.train_loader) xtrain_zc_scores = zc_method.query(xtrain) xtest_zc_scores = zc_method.query(xtest) train_mean = np.mean(np.array(xtrain_zc_scores)) train_std = np.std((np.array(xtrain_zc_scores))) normalized_train = (np.array(xtrain_zc_scores) - train_mean) / train_std normalized_test = (np.array(xtest_zc_scores) - train_mean) / train_std self.xtrain_zc_info[f'{method_name}_scores'] = normalized_train self.xtest_zc_info[f'{method_name}_scores'] = normalized_test
def adapt_search_space(self, search_space, scope=None, dataset_api=None): assert search_space.QUERYABLE, "Bananas is currently only implemented for benchmarks." self.search_space = search_space.clone() self.scope = scope if scope else search_space.OPTIMIZER_SCOPE self.dataset_api = dataset_api if self.zc: self.train_loader, _, _, _, _ = get_train_val_loaders(self.config, mode='train')
def _prepare_dataloaders(self, config, mode='train'): """ Prepare train, validation, and test dataloaders with the splits defined in the config. Args: config (AttrDict): config from config file. """ train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders(config, mode) self.train_queue = train_queue self.valid_queue = valid_queue self.test_queue = test_queue
def pre_compute(self, xtrain, xtest=None, unlabeled=None): """ All of this computation could go into fit() and query(), but we do it here to save time, so that we don't have to re-compute Jacobian covariances for all train_sizes when running experiment_types that vary train size or fidelity. This method computes zerocost info for the train set, test set, and synthetic set (if applicable). It also stores the synthetic architectures. """ self.xtrain_zc_info = {} self.xtest_zc_info = {} self.unlabeled_zc_info = {} self.unlabeled = unlabeled if len(self.zero_cost) > 0: self.train_loader, _, _, _, _ = utils.get_train_val_loaders( self.config, mode='train') for method_name in self.zero_cost: if self.ss_type in ['nasbench101', 'darts']: zc_method = ZeroCostV2(self.config, batch_size=64, method_type=method_name) else: zc_method = ZeroCostV1(self.config, batch_size=64, method_type=method_name) zc_method.train_loader = copy.deepcopy(self.train_loader) # save the raw scores, since bucketing depends on the train set size self.xtrain_zc_info[f'{method_name}_scores'] = zc_method.query( xtrain) self.xtest_zc_info[f'{method_name}_scores'] = zc_method.query( xtest) if unlabeled is not None: self.unlabeled_zc_info[ f'{method_name}_scores'] = zc_method.query(unlabeled)
def build_eval_dataloaders(config): train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders( config, mode='val') return train_queue, valid_queue, test_queue
def build_search_dataloaders(config): train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders( config, mode='train') return train_queue, valid_queue, _ # test_queue is not used in search currently
def pre_process(self): self.train_loader, _, _, _, _ = get_train_val_loaders(self.config, mode='train')
def pre_process(self): if self.include_zero_cost: # pre load image training data for zero-cost methods from naslib.utils import utils self.train_loader, _, _, _, _ = utils.get_train_val_loaders( self.config, mode='train')