Пример #1
0
    def predict(self, pipeline_config, network, predict_loader):
        if pipeline_config["torch_num_threads"] > 0:
            torch.set_num_threads(pipeline_config["torch_num_threads"])

        device = Trainer.get_device(pipeline_config)
        
        Y = predict(network, predict_loader, device)
        return {'Y': Y.detach().cpu().numpy()}
Пример #2
0
    def predict(self, pipeline_config, network, predict_loader):
        """Predict using trained neural network
        
        Arguments:
            pipeline_config {dict} -- The user specified configuration of the pipeline
            network {BaseNet} -- The trained neural network.
            predict_loader {DataLoader} -- The data to predict the labels for.
        
        Returns:
            dict -- The predicted labels in a dict.
        """
        if pipeline_config["torch_num_threads"] > 0:
            torch.set_num_threads(pipeline_config["torch_num_threads"])

        device = Trainer.get_device(pipeline_config)
        
        Y = predict(network, predict_loader, device)
        return {'Y': Y.detach().cpu().numpy()}
Пример #3
0
    def fit(self, hyperparameter_config, pipeline_config, train_loader,
            valid_loader, network, optimizer, optimize_metric,
            additional_metrics, log_functions, budget, loss_function,
            training_techniques, fit_start_time, refit,
            hyperparameter_config_id):
        """Train the network.
        
        Arguments:
            hyperparameter_config {dict} -- The sampled hyperparameter config.
            pipeline_config {dict} -- The user specified configuration of the pipeline
            train_loader {DataLoader} -- Data for training.
            valid_loader {DataLoader} -- Data for validation.
            network {BaseNet} -- The neural network to be trained.
            optimizer {AutoNetOptimizerBase} -- The selected optimizer.
            optimize_metric {AutoNetMetric} -- The selected metric to optimize
            additional_metrics {list} -- List of metrics, that should be logged
            log_functions {list} -- List of AutoNetLofFunctions that can log additional stuff like test performance
            budget {float} -- The budget for training
            loss_function {_Loss} -- The selected PyTorch loss module
            training_techniques {list} -- List of objects inheriting from BaseTrainingTechnique.
            fit_start_time {float} -- Start time of fit
            refit {bool} -- Whether training for refit or not.
        
        Returns:
            dict -- loss and info reported to bohb
        """
        self.hyperparameter_config_id = hyperparameter_config_id
        self.pipeline_config = pipeline_config
        self.budget = budget
        hyperparameter_config = ConfigWrapper(self.get_name(),
                                              hyperparameter_config)
        logger = logging.getLogger('autonet')
        logger.debug("Start train. Budget: " + str(budget))

        if pipeline_config["torch_num_threads"] > 0:
            torch.set_num_threads(pipeline_config["torch_num_threads"])

        trainer = Trainer(
            model=network,
            loss_computation=self.batch_loss_computation_techniques[
                hyperparameter_config["batch_loss_computation_technique"]](),
            metrics=[optimize_metric] + additional_metrics,
            log_functions=log_functions,
            criterion=loss_function,
            budget=budget,
            optimizer=optimizer,
            training_techniques=training_techniques,
            device=Trainer.get_device(pipeline_config),
            logger=logger,
            full_eval_each_epoch=pipeline_config["full_eval_each_epoch"])
        trainer.prepare(pipeline_config, hyperparameter_config, fit_start_time)

        model_params = self.count_parameters(network)

        logs = trainer.model.logs
        epoch = trainer.model.epochs_trained
        training_start_time = time.time()

        print("RESULT: ", pipeline_config['result_logger_dir'])

        #Instantiate the SummaryWriter class if tensrobaord is activated
        if 'use_tensorboard_logger' in pipeline_config and pipeline_config[
                'use_tensorboard_logger']:
            writer = SummaryWriter()

        while True:
            # prepare epoch
            log = dict()
            trainer.on_epoch_start(log=log, epoch=epoch)

            # training
            optimize_metric_results, train_loss, stop_training = trainer.train(
                epoch + 1, train_loader)
            if valid_loader is not None and trainer.eval_valid_each_epoch:
                valid_metric_results = trainer.evaluate(valid_loader)

            # evaluate
            log['loss'] = train_loss
            log['model_parameters'] = model_params
            for i, metric in enumerate(trainer.metrics):
                log['train_' + metric.name] = optimize_metric_results[i]

                if valid_loader is not None and trainer.eval_valid_each_epoch:
                    log['val_' + metric.name] = valid_metric_results[i]
            if trainer.eval_additional_logs_each_epoch:
                for additional_log in trainer.log_functions:
                    log[additional_log.name] = additional_log(
                        trainer.model, epoch)

            # wrap up epoch
            stop_training = trainer.on_epoch_end(log=log,
                                                 epoch=epoch) or stop_training

            # handle logs
            logs.append(log)
            log = {
                key: value
                for key, value in log.items()
                if not isinstance(value, np.ndarray)
            }
            logger.debug("Epoch: " + str(epoch) + " : " + str(log))

            # if 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger']:
            #     self.tensorboard_log(budget=budget, epoch=epoch, log=log, logdir=pipeline_config["result_logger_dir"])

            if 'use_tensorboard_logger' in pipeline_config and pipeline_config[
                    'use_tensorboard_logger']:
                self.tensorboard_log(
                    writer,
                    budget=budget,
                    epoch=epoch,
                    log=log,
                    logdir=pipeline_config["result_logger_dir"])
            if stop_training:
                break

            epoch += 1
            torch.cuda.empty_cache()

        # wrap up
        loss, final_log = self.wrap_up_training(
            trainer=trainer,
            logs=logs,
            epoch=epoch,
            train_loader=train_loader,
            valid_loader=valid_loader,
            budget=budget,
            training_start_time=training_start_time,
            fit_start_time=fit_start_time,
            best_over_epochs=pipeline_config['best_over_epochs'],
            refit=refit,
            logger=logger)

        return {'loss': loss, 'info': final_log}
Пример #4
0
    def fit(self, hyperparameter_config, pipeline_config,
            train_loader, valid_loader,
            network, optimizer,
            train_metric, additional_metrics,
            log_functions,
            budget,
            loss_function,
            training_techniques,
            fit_start_time,
            refit):
        hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config) 
        logger = logging.getLogger('autonet')
        logger.debug("Start train. Budget: " + str(budget))

        if pipeline_config["torch_num_threads"] > 0:
            torch.set_num_threads(pipeline_config["torch_num_threads"])

        trainer = Trainer(
            model=network,
            loss_computation=self.batch_loss_computation_techniques[hyperparameter_config["batch_loss_computation_technique"]](),
            metrics=[train_metric] + additional_metrics,
            log_functions=log_functions,
            criterion=loss_function,
            budget=budget,
            optimizer=optimizer,
            training_techniques=training_techniques,
            device=Trainer.get_device(pipeline_config),
            logger=logger,
            full_eval_each_epoch=pipeline_config["full_eval_each_epoch"])
        trainer.prepare(pipeline_config, hyperparameter_config, fit_start_time)

        logs = trainer.model.logs
        epoch = trainer.model.epochs_trained
        training_start_time = time.time()
        while True:
            # prepare epoch
            log = dict()
            trainer.on_epoch_start(log=log, epoch=epoch)
            
            # training
            train_metric_results, train_loss, stop_training = trainer.train(epoch + 1, train_loader)
            if valid_loader is not None and trainer.eval_valid_each_epoch:
                valid_metric_results = trainer.evaluate(valid_loader)

            # evaluate
            log['loss'] = train_loss
            for i, metric in enumerate(trainer.metrics):
                log['train_' + metric.__name__] = train_metric_results[i]

                if valid_loader is not None and trainer.eval_valid_each_epoch:
                    log['val_' + metric.__name__] = valid_metric_results[i]
            if trainer.eval_additional_logs_each_epoch:
                for additional_log in trainer.log_functions:
                    log[additional_log.__name__] = additional_log(trainer.model, epoch)

            # wrap up epoch
            stop_training = trainer.on_epoch_end(log=log, epoch=epoch) or stop_training

            # handle logs
            logs.append(log)
            log = {key: value for key, value in log.items() if not isinstance(value, np.ndarray)}
            logger.debug("Epoch: " + str(epoch) + " : " + str(log))
            if 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger']:
                self.tensorboard_log(budget=budget, epoch=epoch, log=log)

            if stop_training:
                break
            
            epoch += 1
            torch.cuda.empty_cache()

        # wrap up
        loss, final_log = self.wrap_up_training(trainer=trainer, logs=logs, epoch=epoch, minimize=pipeline_config['minimize'],
            train_loader=train_loader, valid_loader=valid_loader, budget=budget, training_start_time=training_start_time, fit_start_time=fit_start_time,
            best_over_epochs=pipeline_config['best_over_epochs'], refit=refit, logger=logger)
    
        return {'loss': loss, 'info': final_log}