예제 #1
0
파일: autodl.py 프로젝트: VolcanoML/soln-ml
 def score(self, test_data: DLDataset, mode='test', metric_func=None):
     if metric_func is None:
         metric_func = self.metric
     preds = self.predict(test_data, mode=mode)
     labels = test_data.get_labels(mode=mode)
     # TODO: support AUC
     return metric_func._score_func(preds, labels)
예제 #2
0
파일: autodl.py 프로젝트: VolcanoML/soln-ml
 def load_predict_data(self, test_data: DLDataset):
     if self.task_type == IMG_CLS:
         test_transforms = get_test_transforms(self.best_algo_config,
                                               image_size=self.image_size)
         test_data.load_test_data(test_transforms)
         test_data.load_data(test_transforms, test_transforms)
     else:
         test_data.load_test_data()
         test_data.load_data()
예제 #3
0
    def predict(self, test_data: DLDataset, mode='test'):
        model_pred_list = list()
        final_pred = list()

        model_cnt = 0
        for algo_id in self.stats["include_algorithms"]:
            model_configs = self.stats[algo_id]['model_configs']
            for idx, config in enumerate(model_configs):
                if self.task_type == IMG_CLS:
                    test_transforms = get_test_transforms(config, image_size=self.image_size)
                    test_data.load_test_data(test_transforms)
                    test_data.load_data(test_transforms, test_transforms)
                else:
                    test_data.load_test_data()
                    test_data.load_data()

                if mode == 'test':
                    dataset = test_data.test_dataset
                else:
                    if test_data.subset_sampler_used:
                        dataset = test_data.train_dataset
                    else:
                        dataset = test_data.val_dataset
                estimator = get_estimator_with_parameters(self.task_type, config, self.max_epoch,
                                                          dataset, self.timestamp, device=self.device)
                if self.task_type in CLS_TASKS:
                    if mode == 'test':
                        model_pred_list.append(estimator.predict_proba(test_data.test_dataset))
                    else:
                        if test_data.subset_sampler_used:
                            model_pred_list.append(
                                estimator.predict_proba(test_data.train_dataset, sampler=test_data.val_sampler))
                        else:
                            model_pred_list.append(estimator.predict_proba(test_data.val_dataset))
                else:
                    if mode == 'test':
                        model_pred_list.append(estimator.predict(test_data.test_dataset))
                    else:
                        if test_data.subset_sampler_used:
                            model_pred_list.append(
                                estimator.predict(test_data.train_dataset, sampler=test_data.val_sampler))
                        else:
                            model_pred_list.append(estimator.predict(test_data.val_dataset))
                model_cnt += 1

        # Calculate the average of predictions
        for i in range(len(model_pred_list[0])):
            sample_pred_list = [model_pred[i] for model_pred in model_pred_list]
            pred_average = reduce(lambda x, y: x + y, sample_pred_list) / len(sample_pred_list)
            final_pred.append(pred_average)

        return np.array(final_pred)
예제 #4
0
파일: autodl.py 프로젝트: VolcanoML/soln-ml
    def refit(self, dataset: DLDataset):
        if self.es is None:
            config_dict = self.best_algo_config.get_dictionary().copy()
            # TODO:Specify model dir
            model_dir = './data/dl_models'
            model_path = os.path.join(
                model_dir, '%s_%s.pt' %
                (self.timestamp,
                 TopKModelSaver.get_configuration_id(self.best_algo_config)))
            # Remove the old models.
            if os.path.exists(model_path):
                os.remove(model_path)

            mode = 'refit'
            if self.task_type == IMG_CLS:
                train_transforms = get_transforms(self.best_algo_config,
                                                  image_size=self.image_size)
                dataset.load_data(train_transforms['train'],
                                  train_transforms['val'])
                if dataset.test_data_path is not None:
                    test_transforms = get_test_transforms(
                        self.best_algo_config, image_size=self.image_size)
                    dataset.load_test_data(test_transforms)
                    mode = 'refit_test'

            else:
                dataset.load_data()
                if dataset.test_data_path is not None:
                    dataset.load_test_data()
                    mode = 'refit_test'

            # Refit the models.
            _, estimator = get_estimator(self.task_type,
                                         config_dict,
                                         self.max_epoch,
                                         device=self.device)
            estimator.fit(dataset, mode=mode)
            # Save to the disk.
            state = {
                'model': estimator.model.state_dict(),
                'optimizer': estimator.optimizer_.state_dict(),
                'scheduler': estimator.scheduler.state_dict(),
                'epoch_num': estimator.epoch_num,
                'early_stop': estimator.early_stop
            }
            torch.save(state, model_path)
        else:
            self.es.refit(dataset)
예제 #5
0
    def predict(self, test_data: DLDataset, mode='test'):
        predictions = []
        cur_idx = 0
        num_samples = 0

        for algo_id in self.stats["include_algorithms"]:
            model_configs = self.stats[algo_id]['model_configs']
            for idx, config in enumerate(model_configs):
                if self.task_type == IMG_CLS:
                    test_transforms = get_test_transforms(
                        config, image_size=self.image_size)
                    test_data.load_test_data(test_transforms)
                    test_data.load_data(test_transforms, test_transforms)
                else:
                    test_data.load_test_data()
                    test_data.load_data()

                if num_samples == 0:
                    if mode == 'test':
                        dataset = test_data.test_dataset
                        loader = DataLoader(dataset)
                        num_samples = len(loader)
                    else:
                        if test_data.subset_sampler_used:
                            dataset = test_data.train_dataset
                            num_samples = len(test_data.val_sampler)
                        else:
                            dataset = test_data.val_dataset
                            loader = DataLoader(dataset)
                            num_samples = len(loader)

                estimator = get_estimator_with_parameters(self.task_type,
                                                          config,
                                                          self.max_epoch,
                                                          dataset,
                                                          self.timestamp,
                                                          device=self.device)
                if cur_idx in self.model_idx:
                    if self.task_type in CLS_TASKS:
                        if mode == 'test':
                            predictions.append(
                                estimator.predict_proba(
                                    test_data.test_dataset))
                        else:
                            if test_data.subset_sampler_used:
                                predictions.append(
                                    estimator.predict_proba(
                                        test_data.train_dataset,
                                        sampler=test_data.val_sampler))
                            else:
                                predictions.append(
                                    estimator.predict_proba(
                                        test_data.val_dataset))
                    else:
                        if mode == 'test':
                            predictions.append(
                                estimator.predict(test_data.test_dataset))
                        else:
                            if test_data.subset_sampler_used:
                                predictions.append(
                                    estimator.predict(
                                        test_data.train_dataset,
                                        sampler=test_data.val_sampler))
                            else:
                                predictions.append(
                                    estimator.predict(test_data.val_dataset))
                else:
                    if len(self.shape) == 1:
                        predictions.append(np.zeros(num_samples))
                    else:
                        predictions.append(
                            np.zeros((num_samples, self.shape[1])))
                cur_idx += 1

        predictions = np.asarray(predictions)

        # if predictions.shape[0] == len(self.weights_),
        # predictions include those of zero-weight models.
        if predictions.shape[0] == len(self.weights_):
            return np.average(predictions, axis=0, weights=self.weights_)

        # if prediction model.shape[0] == len(non_null_weights),
        # predictions do not include those of zero-weight models.
        elif predictions.shape[0] == np.count_nonzero(self.weights_):
            non_null_weights = [w for w in self.weights_ if w > 0]
            return np.average(predictions, axis=0, weights=non_null_weights)

        # If none of the above applies, then something must have gone wrong.
        else:
            raise ValueError("The dimensions of ensemble predictions"
                             " and ensemble weights do not match!")
예제 #6
0
파일: autodl.py 프로젝트: VolcanoML/soln-ml
    def fit(self, train_data: DLDataset, **kwargs):
        _start_time = time.time()
        if 'opt_method' in kwargs:
            self.optalgo = kwargs['opt_method']
        else:
            self.optalgo = 'see'

        if self.task_type == IMG_CLS:
            self.image_size = kwargs['image_size']

        if self.config_file_path is not None:
            config_parser = ConfigParser(logger=self.logger)
            self.update_cs = config_parser.read(self.config_file_path)

        # TODO: For first-time user, download pretrained params here!
        algorithm_candidates = self.include_algorithms.copy()
        num_train_samples = train_data.get_train_samples_num()
        if self.optalgo == 'hpo':
            self._fit_in_hpo_way(algorithm_candidates, train_data, **kwargs)
            return

        # Initialize solver for each architecture.
        for estimator_id in self.include_algorithms:
            cs = self.get_model_config_space(estimator_id)
            default_config = cs.get_default_configuration()
            cs.seed(self.seed)

            hpo_evaluator = DLEvaluator(default_config,
                                        self.task_type,
                                        max_epoch=self.max_epoch,
                                        scorer=self.metric,
                                        dataset=train_data,
                                        device=self.device,
                                        seed=self.seed,
                                        timestamp=self.timestamp,
                                        **kwargs)
            optimizer = build_hpo_optimizer(self.evaluation_type,
                                            hpo_evaluator,
                                            cs,
                                            output_dir=self.output_dir,
                                            per_run_time_limit=100000,
                                            timestamp=self.timestamp,
                                            seed=self.seed,
                                            n_jobs=self.n_jobs)
            self.solvers[estimator_id] = optimizer
            self.evaluators[estimator_id] = hpo_evaluator

        # Execute profiling procedure.
        if not self.skip_profile:
            algorithm_candidates = self.profile_models(num_train_samples)
            if len(algorithm_candidates) == 0:
                raise ValueError(
                    'After profiling, no arch is in the candidates!')
            else:
                self.logger.info('After profiling, arch candidates={%s}' %
                                 ','.join(algorithm_candidates))

        # Execute neural architecture selection.
        self.logger.info('Before NAS, arch candidates={%s}' %
                         ','.join(algorithm_candidates))

        dl_evaluator = DLEvaluator(None,
                                   self.task_type,
                                   max_epoch=self.max_epoch,
                                   scorer=self.metric,
                                   dataset=train_data,
                                   device=self.device,
                                   seed=self.seed,
                                   timestamp=self.timestamp,
                                   **kwargs)
        if self.optalgo == 'see':
            from solnml.components.optimizers.cashp_optimizer import CashpOptimizer
            self.see_optimizer = CashpOptimizer(self.task_type,
                                                algorithm_candidates,
                                                self.time_limit,
                                                n_jobs=self.n_jobs)
            inc_config, inc_perf = self.see_optimizer.run(dl_evaluator)
            self.best_algo_config = inc_config
            self.best_algo_id = inc_config['estimator']
            return

        algorithm_candidates = self.select_network_architectures(
            algorithm_candidates, dl_evaluator, num_arch=1, **kwargs)
        self.logger.info('After NAS, arch candidates={%s}' %
                         ','.join(algorithm_candidates))
        # Control flow via round robin.
        n_algorithm = len(algorithm_candidates)
        if self.trial_num is None:
            algo_id = 0
            while True:
                _time_elapsed = time.time() - _start_time
                if _time_elapsed >= self.time_limit:
                    break
                _budget_left = self.time_limit - _time_elapsed
                self.solvers[algorithm_candidates[algo_id]].iterate(
                    budget=_budget_left)
                algo_id = (algo_id + 1) % n_algorithm
        else:
            for id in self.trial_num:
                self.solvers[algorithm_candidates[id % n_algorithm]].iterate()

        # Best architecture id.
        best_scores_ = list()
        for estimator_id in algorithm_candidates:
            if estimator_id in self.solvers:
                solver_ = self.solvers[estimator_id]
                if len(solver_.perfs) > 0:
                    best_scores_.append(np.max(solver_.perfs))
                else:
                    best_scores_.append(-np.inf)
            else:
                best_scores_.append(-np.inf)
        print(algorithm_candidates, best_scores_)
        assert len(algorithm_candidates) > 0

        if len(best_scores_) > 1 and (np.array(best_scores_) > -np.inf).any():
            self.best_algo_id = algorithm_candidates[np.argmax(best_scores_)]
            # Best model configuration.
            solver_ = self.solvers[self.best_algo_id]
            inc_idx = np.argmax(solver_.perfs)
            self.best_algo_config = solver_.configs[inc_idx]
        else:
            self.best_algo_id = algorithm_candidates[0]
            rs = list(self.eval_hist_perfs.keys())
            set_flag = False
            if len(rs) > 0:
                max_resource = np.max(rs)
                if max_resource in self.eval_hist_configs:
                    idxs = [
                        idx for (idx, config) in enumerate(
                            self.eval_hist_configs[max_resource])
                        if config['estimator'] == self.best_algo_id
                    ]
                    best_idx = np.argmax([
                        self.eval_hist_perfs[max_resource][idx] for idx in idxs
                    ])
                    self.best_algo_config = self.eval_hist_configs[
                        max_resource][best_idx]
                    set_flag = True
            if not set_flag:
                solver_ = self.solvers[self.best_algo_id]
                inc_idx = np.argmax(solver_.perfs)
                self.best_algo_config = solver_.configs[inc_idx]

        print(self.best_algo_config)
        # Skip Ensemble
        if self.task_type == OBJECT_DET:
            return

        if self.ensemble_method is not None:
            stats = self.fetch_ensemble_members(algorithm_candidates)

            # Ensembling all intermediate/ultimate models found in above optimization process.
            self.es = EnsembleBuilder(stats=stats,
                                      ensemble_method=self.ensemble_method,
                                      ensemble_size=self.ensemble_size,
                                      task_type=self.task_type,
                                      max_epoch=self.max_epoch,
                                      metric=self.metric,
                                      timestamp=self.timestamp,
                                      device=self.device,
                                      output_dir=self.output_dir,
                                      **kwargs)
            self.es.fit(data=train_data)