Exemplo n.º 1
0
 def test_get_name_error(self):
     dataset_properties = {'task_type': 'tabular_classification'}
     names = ['root_mean_sqaured_error', 'average_precision']
     try:
         get_metrics(dataset_properties, names)
     except ValueError as msg:
         self.assertRegex(str(msg), r"Invalid name entered for task [a-z]+_[a-z]+, "
                                    r"currently supported metrics for task include .*")
Exemplo n.º 2
0
def test_get_name_error():
    dataset_properties = {
        'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
        'output_type': OUTPUT_TYPES_TO_STRING[BINARY]
    }
    names = ['root_mean_sqaured_error', 'average_precision']
    with pytest.raises(ValueError,
                       match=r"Invalid name entered for task [a-z]+_[a-z]+, "):
        get_metrics(dataset_properties, names)
Exemplo n.º 3
0
    def score(self,
              X: np.ndarray,
              y: np.ndarray,
              batch_size: Optional[int] = None,
              metric_name: str = 'accuracy') -> float:
        """Scores the fitted estimator on (X, y)

        Args:
            X (np.ndarray):
                input to the pipeline, from which to guess targets
            batch_size (Optional[int]):
                batch_size controls whether the pipeline
                will be called on small chunks of the data.
                Useful when calling the predict method on
                the whole array X results in a MemoryError.
            y (np.ndarray):
                Ground Truth labels
            metric_name (str: default = 'accuracy'):
                 name of the metric to be calculated
        Returns:
            float: score based on the metric name
        """
        from autoPyTorch.pipeline.components.training.metrics.utils import get_metrics, calculate_score
        metrics = get_metrics(self.dataset_properties, [metric_name])
        y_pred = self.predict(X, batch_size=batch_size)
        score = calculate_score(y,
                                y_pred,
                                task_type=STRING_TO_TASK_TYPES[str(
                                    self.dataset_properties['task_type'])],
                                metrics=metrics)[metric_name]
        return score
Exemplo n.º 4
0
 def test_get_name(self):
     dataset_properties = {'task_type': 'tabular_classification'}
     names = ['accuracy', 'average_precision']
     metrics = get_metrics(dataset_properties, names)
     for i in range(len(metrics)):
         self.assertTrue(isinstance(metrics[i], autoPyTorchMetric))
         self.assertEqual(metrics[i].name.lower(), names[i].lower())
Exemplo n.º 5
0
def test_get_no_name_regression(output_type):
    dataset_properties = {
        'task_type': 'tabular_regression',
        'output_type': output_type
    }
    metrics = get_metrics(dataset_properties)
    for metric in metrics:
        assert isinstance(metric, autoPyTorchMetric)
Exemplo n.º 6
0
def test_get_name(metric):
    dataset_properties = {
        'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
        'output_type': OUTPUT_TYPES_TO_STRING[BINARY]
    }
    metrics = get_metrics(dataset_properties, [metric])
    for i in range(len(metrics)):
        assert isinstance(metrics[i], autoPyTorchMetric)
        assert metrics[i].name.lower() == metric.lower()
Exemplo n.º 7
0
    def test_metrics(self):
        # test of all classification metrics
        dataset_properties = {'task_type': 'tabular_classification'}
        y_target = np.array([0, 1, 0, 1])
        y_pred = np.array([0, 0, 0, 1])
        metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
        score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
        self.assertIsInstance(score_dict, dict)
        for name, score in score_dict.items():
            self.assertIsInstance(name, str)
            self.assertIsInstance(score, float)

        # test of all regression metrics
        dataset_properties = {'task_type': 'tabular_regression'}
        y_target = np.array([0.1, 0.6, 0.7, 0.4])
        y_pred = np.array([0.6, 0.7, 0.4, 1])
        metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
        score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)

        self.assertIsInstance(score_dict, dict)
        for name, score in score_dict.items():
            self.assertIsInstance(name, str)
            self.assertIsInstance(score, float)
Exemplo n.º 8
0
def test_classification_metrics():
    # test of all classification metrics
    dataset_properties = {
        'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
        'output_type': OUTPUT_TYPES_TO_STRING[BINARY]
    }
    y_target = np.array([0, 1, 0, 1])
    y_pred = np.array([0, 0, 0, 1])
    metrics = get_metrics(dataset_properties=dataset_properties,
                          all_supported_metrics=True)
    score_dict = calculate_score(
        y_pred, y_target,
        STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
    assert isinstance(score_dict, dict)
    for name, score in score_dict.items():
        assert isinstance(name, str)
        assert isinstance(score, float)
Exemplo n.º 9
0
def test_regression_metrics():
    # test of all regression metrics
    dataset_properties = {
        'task_type': TASK_TYPES_TO_STRING[TABULAR_REGRESSION],
        'output_type': OUTPUT_TYPES_TO_STRING[CONTINUOUS]
    }
    y_target = np.array([0.1, 0.6, 0.7, 0.4])
    y_pred = np.array([0.6, 0.7, 0.4, 1])
    metrics = get_metrics(dataset_properties=dataset_properties,
                          all_supported_metrics=True)
    score_dict = calculate_score(
        y_pred, y_target,
        STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)

    assert isinstance(score_dict, dict)
    for name, score in score_dict.items():
        assert isinstance(name, str)
        assert isinstance(score, float)
Exemplo n.º 10
0
    def setUp(self):
        # Data
        self.X, self.y = make_classification(n_samples=5000,
                                             n_features=4,
                                             n_informative=3,
                                             n_redundant=1,
                                             n_repeated=0,
                                             n_classes=2,
                                             n_clusters_per_class=2,
                                             shuffle=True,
                                             random_state=0)
        self.X = torch.FloatTensor(self.X)
        self.y = torch.LongTensor(self.y)
        self.dataset = torch.utils.data.TensorDataset(self.X, self.y)
        self.loader = torch.utils.data.DataLoader(self.dataset, batch_size=20)
        self.dataset_properties = {
            'task_type': 'tabular_classification',
            'output_type': 'binary'
        }

        # training requirements
        layers = []
        layers.append(torch.nn.Linear(4, 4))
        layers.append(torch.nn.Sigmoid())
        layers.append(torch.nn.Linear(4, 2))
        self.model = torch.nn.Sequential(*layers)
        self.criterion = torch.nn.CrossEntropyLoss()
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)
        self.device = torch.device('cpu')
        self.logger = logging.getLogger('test')
        self.metrics = get_metrics(self.dataset_properties)
        self.epochs = 20
        self.budget_tracker = BudgetTracker(
            budget_type='epochs',
            max_epochs=self.epochs,
        )
        self.task_type = STRING_TO_TASK_TYPES[
            self.dataset_properties['task_type']]
Exemplo n.º 11
0
    def score(self,
              X: np.ndarray,
              y: np.ndarray,
              batch_size: Optional[int] = None) -> np.ndarray:
        """score.

                Args:
                    X (np.ndarray): input to the pipeline, from which to guess targets
                    batch_size (Optional[int]): batch_size controls whether the pipeline
                        will be called on small chunks of the data. Useful when calling the
                        predict method on the whole array X results in a MemoryError.
                Returns:
                    np.ndarray: coefficient of determination R^2 of the prediction
                """
        from autoPyTorch.pipeline.components.training.metrics.utils import get_metrics, calculate_score
        metrics = get_metrics(self.dataset_properties, ['r2'])
        y_pred = self.predict(X, batch_size=batch_size)
        r2 = calculate_score(y,
                             y_pred,
                             task_type=STRING_TO_TASK_TYPES[
                                 self.dataset_properties['task_type']],
                             metrics=metrics)['r2']
        return r2
Exemplo n.º 12
0
    def __init__(self,
                 task_type: str,
                 output_type: str,
                 optimize_metric: Optional[str] = None,
                 logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT,
                 random_state: Optional[np.random.RandomState] = None,
                 name: Optional[str] = None):

        self.model: Optional[Union[CatBoost, BaseEstimator]] = None

        self.name = name if name is not None else self.__class__.__name__
        self.logger_port = logger_port
        self.logger = get_named_client_logger(
            name=self.name,
            host='localhost',
            port=logger_port,
        )

        if random_state is None:
            self.random_state = check_random_state(1)
        else:
            self.random_state = check_random_state(random_state)
        self.config = self.get_config()

        self.all_nan: Optional[np.ndarray] = None
        self.num_classes: Optional[int] = None

        self.is_classification = STRING_TO_TASK_TYPES[
            task_type] not in REGRESSION_TASKS

        self.metric = get_metrics(dataset_properties={
            'task_type': task_type,
            'output_type': output_type
        },
                                  names=[optimize_metric]
                                  if optimize_metric is not None else None)[0]
Exemplo n.º 13
0
    def __init__(
        self,
        backend: Backend,
        queue: Queue,
        metric: autoPyTorchMetric,
        budget: float,
        configuration: Union[int, str, Configuration],
        budget_type: str = None,
        pipeline_config: Optional[Dict[str, Any]] = None,
        seed: int = 1,
        output_y_hat_optimization: bool = True,
        num_run: Optional[int] = None,
        include: Optional[Dict[str, Any]] = None,
        exclude: Optional[Dict[str, Any]] = None,
        disable_file_output: Union[bool, List[str]] = False,
        init_params: Optional[Dict[str, Any]] = None,
        logger_port: Optional[int] = None,
        all_supported_metrics: bool = True,
        search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None
    ) -> None:

        self.starttime = time.time()

        self.configuration = configuration
        self.backend: Backend = backend
        self.queue = queue

        self.datamanager: BaseDataset = self.backend.load_datamanager()

        assert self.datamanager.task_type is not None, \
            "Expected dataset {} to have task_type got None".format(self.datamanager.__class__.__name__)
        self.task_type = STRING_TO_TASK_TYPES[self.datamanager.task_type]
        self.output_type = STRING_TO_OUTPUT_TYPES[self.datamanager.output_type]
        self.issparse = self.datamanager.issparse

        self.include = include
        self.exclude = exclude
        self.search_space_updates = search_space_updates

        self.X_train, self.y_train = self.datamanager.train_tensors

        if self.datamanager.val_tensors is not None:
            self.X_valid, self.y_valid = self.datamanager.val_tensors
        else:
            self.X_valid, self.y_valid = None, None

        if self.datamanager.test_tensors is not None:
            self.X_test, self.y_test = self.datamanager.test_tensors
        else:
            self.X_test, self.y_test = None, None

        self.metric = metric

        self.seed = seed

        # Flag to save target for ensemble
        self.output_y_hat_optimization = output_y_hat_optimization

        if isinstance(disable_file_output, bool):
            self.disable_file_output: bool = disable_file_output
        elif isinstance(disable_file_output, List):
            self.disabled_file_outputs: List[str] = disable_file_output
        else:
            raise ValueError(
                'disable_file_output should be either a bool or a list')

        self.pipeline_class: Optional[Union[BaseEstimator,
                                            BasePipeline]] = None
        if self.task_type in REGRESSION_TASKS:
            if isinstance(self.configuration, int):
                self.pipeline_class = DummyRegressionPipeline
            elif isinstance(self.configuration, str):
                self.pipeline_class = MyTraditionalTabularRegressionPipeline
            elif isinstance(self.configuration, Configuration):
                self.pipeline_class = autoPyTorch.pipeline.tabular_regression.TabularRegressionPipeline
            else:
                raise ValueError('task {} not available'.format(
                    self.task_type))
            self.predict_function = self._predict_regression
        else:
            if isinstance(self.configuration, int):
                self.pipeline_class = DummyClassificationPipeline
            elif isinstance(self.configuration, str):
                if self.task_type in TABULAR_TASKS:
                    self.pipeline_class = MyTraditionalTabularClassificationPipeline
                else:
                    raise ValueError(
                        "Only tabular tasks are currently supported with traditional methods"
                    )
            elif isinstance(self.configuration, Configuration):
                if self.task_type in TABULAR_TASKS:
                    self.pipeline_class = autoPyTorch.pipeline.tabular_classification.TabularClassificationPipeline
                elif self.task_type in IMAGE_TASKS:
                    self.pipeline_class = autoPyTorch.pipeline.image_classification.ImageClassificationPipeline
                else:
                    raise ValueError('task {} not available'.format(
                        self.task_type))
            self.predict_function = self._predict_proba
        self.dataset_properties = self.datamanager.get_dataset_properties(
            get_dataset_requirements(
                info=self.datamanager.get_required_dataset_info(),
                include=self.include,
                exclude=self.exclude,
                search_space_updates=self.search_space_updates))

        self.additional_metrics: Optional[List[autoPyTorchMetric]] = None
        metrics_dict: Optional[Dict[str, List[str]]] = None
        if all_supported_metrics:
            self.additional_metrics = get_metrics(
                dataset_properties=self.dataset_properties,
                all_supported_metrics=all_supported_metrics)
            # Update fit dictionary with metrics passed to the evaluator
            metrics_dict = {'additional_metrics': []}
            metrics_dict['additional_metrics'].append(self.metric.name)
            for metric in self.additional_metrics:
                metrics_dict['additional_metrics'].append(metric.name)

        self._init_params = init_params

        assert self.pipeline_class is not None, "Could not infer pipeline class"
        pipeline_config = pipeline_config if pipeline_config is not None \
            else self.pipeline_class.get_default_pipeline_options()
        self.budget_type = pipeline_config[
            'budget_type'] if budget_type is None else budget_type
        self.budget = pipeline_config[
            self.budget_type] if budget == 0 else budget

        self.num_run = 0 if num_run is None else num_run

        logger_name = '%s(%d)' % (self.__class__.__name__.split('.')[-1],
                                  self.seed)
        if logger_port is None:
            logger_port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
        self.logger = get_named_client_logger(
            name=logger_name,
            port=logger_port,
        )

        self._init_fit_dictionary(logger_port=logger_port,
                                  pipeline_config=pipeline_config,
                                  metrics_dict=metrics_dict)
        self.Y_optimization: Optional[np.ndarray] = None
        self.Y_actual_train: Optional[np.ndarray] = None
        self.pipelines: Optional[List[BaseEstimator]] = None
        self.pipeline: Optional[BaseEstimator] = None
        self.logger.debug("Fit dictionary in Abstract evaluator: {}".format(
            dict_repr(self.fit_dictionary)))
        self.logger.debug("Search space updates :{}".format(
            self.search_space_updates))
Exemplo n.º 14
0
 def test_get_no_name(self):
     dataset_properties = {'task_type': 'tabular_classification'}
     metrics = get_metrics(dataset_properties)
     for metric in metrics:
         self.assertTrue(isinstance(metric, autoPyTorchMetric))
    def _fit(self, X: Dict[str, Any], y: Any = None, **kwargs: Any) -> torch.nn.Module:
        """
        Fits a component by using an input dictionary with pre-requisites

        Args:
            X (X: Dict[str, Any]): Dependencies needed by current component to perform fit
            y (Any): not used. To comply with sklearn API

        Returns:
            A instance of self
        """

        # Comply with mypy
        # Notice that choice here stands for the component choice framework,
        # where we dynamically build the configuration space by selecting the available
        # component choices. In this case, is what trainer choices are available
        assert self.choice is not None

        # Setup a Logger and other logging support
        # Writer is not pickable -- make sure it is not saved in self
        writer = None
        if 'use_tensorboard_logger' in X and X['use_tensorboard_logger']:
            writer = SummaryWriter(log_dir=X['backend'].temporary_directory)

        if X["torch_num_threads"] > 0:
            torch.set_num_threads(X["torch_num_threads"])

        budget_tracker = BudgetTracker(
            budget_type=X['budget_type'],
            max_runtime=X['runtime'] if 'runtime' in X else None,
            max_epochs=X['epochs'] if 'epochs' in X else None,
        )

        # Support additional user metrics
        additional_metrics = X['additional_metrics'] if 'additional_metrics' in X else None
        additional_losses = X['additional_losses'] if 'additional_losses' in X else None
        self.choice.prepare(
            model=X['network'],
            metrics=get_metrics(dataset_properties=X['dataset_properties'],
                                names=additional_metrics),
            criterion=get_loss_instance(X['dataset_properties'],
                                        name=additional_losses),
            budget_tracker=budget_tracker,
            optimizer=X['optimizer'],
            device=self.get_device(X),
            metrics_during_training=X['metrics_during_training'],
            scheduler=X['lr_scheduler'],
            task_type=STRING_TO_TASK_TYPES[X['dataset_properties']['task_type']]
        )
        total_parameter_count, trainable_parameter_count = self.count_parameters(X['network'])
        self.run_summary = RunSummary(
            total_parameter_count,
            trainable_parameter_count,
        )

        epoch = 1

        while True:

            # prepare epoch
            start_time = time.time()

            self.choice.on_epoch_start(X=X, epoch=epoch)

            # training
            train_loss, train_metrics = self.choice.train_epoch(
                train_loader=X['train_data_loader'],
                epoch=epoch,
                logger=self.logger,
                writer=writer,
            )

            val_loss, val_metrics, test_loss, test_metrics = None, {}, None, {}
            if self.eval_valid_each_epoch(X):
                val_loss, val_metrics = self.choice.evaluate(X['val_data_loader'], epoch, writer)
                if 'test_data_loader' in X and X['test_data_loader']:
                    test_loss, test_metrics = self.choice.evaluate(X['test_data_loader'], epoch, writer)

            # Save training information
            self.run_summary.add_performance(
                epoch=epoch,
                start_time=start_time,
                end_time=time.time(),
                train_loss=train_loss,
                val_loss=val_loss,
                test_loss=test_loss,
                train_metrics=train_metrics,
                val_metrics=val_metrics,
                test_metrics=test_metrics,
            )

            # Save the weights of the best model and, if patience
            # exhausted break training
            if self.early_stop_handler(X):
                break

            if self.choice.on_epoch_end(X=X, epoch=epoch):
                break

            self.logger.debug(self.run_summary.repr_last_epoch())

            # Reached max epoch on next iter, don't even go there
            if budget_tracker.is_max_epoch_reached(epoch + 1):
                break

            epoch += 1

            torch.cuda.empty_cache()

        # wrap up -- add score if not evaluating every epoch
        if not self.eval_valid_each_epoch(X):
            val_loss, val_metrics = self.choice.evaluate(X['val_data_loader'])
            if 'test_data_loader' in X and X['val_data_loader']:
                test_loss, test_metrics = self.choice.evaluate(X['test_data_loader'])
            self.run_summary.add_performance(
                epoch=epoch,
                start_time=start_time,
                end_time=time.time(),
                train_loss=train_loss,
                val_loss=val_loss,
                test_loss=test_loss,
                train_metrics=train_metrics,
                val_metrics=val_metrics,
                test_metrics=test_metrics,
            )
            self.logger.debug(self.run_summary.repr_last_epoch())
            self.save_model_for_ensemble()

        self.logger.info(f"Finished training with {self.run_summary.repr_last_epoch()}")

        # Tag as fitted
        self.fitted_ = True

        return X['network'].state_dict()
Exemplo n.º 16
0
    def prepare_trainer(self,
                        n_samples: int,
                        trainer: BaseTrainerComponent,
                        task_type: int,
                        epochs=50):
        # make this test reproducible
        torch.manual_seed(1)
        if task_type in CLASSIFICATION_TASKS:
            X, y = make_classification(n_samples=n_samples,
                                       n_features=4,
                                       n_informative=3,
                                       n_redundant=1,
                                       n_repeated=0,
                                       n_classes=2,
                                       n_clusters_per_class=2,
                                       class_sep=3.0,
                                       shuffle=True,
                                       random_state=0)
            X = StandardScaler().fit_transform(X)
            X = torch.tensor(X, dtype=torch.float)
            y = torch.tensor(y, dtype=torch.long)
            output_type = BINARY
            num_outputs = 2
            criterion = torch.nn.CrossEntropyLoss

        elif task_type in REGRESSION_TASKS:
            X, y = make_regression(n_samples=n_samples,
                                   n_features=4,
                                   n_informative=3,
                                   n_targets=1,
                                   shuffle=True,
                                   random_state=0)
            X = StandardScaler().fit_transform(X)
            X = torch.tensor(X, dtype=torch.float)
            y = torch.tensor(y, dtype=torch.float)
            # normalize targets for regression since NNs are better when predicting small outputs
            y = ((y - y.mean()) / y.std()).unsqueeze(1)
            output_type = CONTINUOUS
            num_outputs = 1
            criterion = torch.nn.MSELoss

        else:
            raise ValueError(
                f"task type {task_type} not supported for standard trainer test"
            )

        dataset = torch.utils.data.TensorDataset(X, y)
        loader = torch.utils.data.DataLoader(dataset, batch_size=20)
        dataset_properties = {
            'task_type': TASK_TYPES_TO_STRING[task_type],
            'output_type': OUTPUT_TYPES_TO_STRING[output_type]
        }

        # training requirements
        model = torch.nn.Sequential(torch.nn.Linear(4, 4), torch.nn.Sigmoid(),
                                    torch.nn.Linear(4, num_outputs))

        optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
        device = torch.device('cpu')
        logger = logging.getLogger('StandardTrainer - test')
        metrics = get_metrics(dataset_properties)
        epochs = epochs
        budget_tracker = BudgetTracker(
            budget_type='epochs',
            max_epochs=epochs,
        )

        trainer.prepare(scheduler=None,
                        model=model,
                        metrics=metrics,
                        criterion=criterion,
                        budget_tracker=budget_tracker,
                        optimizer=optimizer,
                        device=device,
                        metrics_during_training=True,
                        task_type=task_type,
                        labels=y)
        return trainer, model, optimizer, loader, criterion, epochs, logger