def hyperopt_cli( config: dict, config_file: str = None, dataset: str = None, training_set: str = None, validation_set: str = None, test_set: str = None, training_set_metadata: str = None, data_format: str = None, experiment_name: str = 'experiment', model_name: str = 'run', # model_load_path=None, # model_resume_path=None, skip_save_training_description: bool = False, skip_save_training_statistics: bool = False, skip_save_model: bool = False, skip_save_progress: bool = False, skip_save_log: bool = False, skip_save_processed_input: bool = False, skip_save_unprocessed_output: bool = False, skip_save_predictions: bool = False, skip_save_eval_stats: bool = False, skip_save_hyperopt_statistics: bool = False, output_directory: str = 'results', gpus: Union[str, int, List[int]] = None, gpu_memory_limit: int = None, allow_parallel_threads: bool = True, use_horovod: bool = None, random_seed: int = default_random_seed, debug: bool = False, **kwargs, ): """ Searches for optimal hyperparameters. # Inputs :param config: (dict) config which defines the different parameters of the model, features, preprocessing and training. :param config_file: (str, default: `None`) the filepath string that specifies the config. It is a yaml file. :param dataset: (Union[str, dict, pandas.DataFrame], default: `None`) source containing the entire dataset to be used for training. If it has a split column, it will be used for splitting (0 for train, 1 for validation, 2 for test), otherwise the dataset will be randomly split. :param training_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing training data. :param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing validation data. :param test_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing test data. :param training_set_metadata: (Union[str, dict], default: `None`) metadata JSON file or loaded metadata. Intermediate preprocessed structure containing the mappings of the input dataset created the first time an input file is used in the same directory with the same name and a '.meta.json' extension. :param data_format: (str, default: `None`) format to interpret data sources. Will be inferred automatically if not specified. Valid formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`, `'fwf'`, `'hdf5'` (cache file produced during previous training), `'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`, `'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`, `'stata'`, `'tsv'`. :param experiment_name: (str, default: `'experiment'`) name for the experiment. :param model_name: (str, default: `'run'`) name of the model that is being used. :param skip_save_training_description: (bool, default: `False`) disables saving the description JSON file. :param skip_save_training_statistics: (bool, default: `False`) disables saving training statistics JSON file. :param skip_save_model: (bool, default: `False`) disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation metric improves, but if the model is really big that can be time consuming. If you do not want to keep the weights and just find out what performance a model can get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on and the returned model will have the weights obtained at the end of training, instead of the weights of the epoch with the best validation performance. :param skip_save_progress: (bool, default: `False`) disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :param skip_save_log: (bool, default: `False`) disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed. :param skip_save_processed_input: (bool, default: `False`) if input dataset is provided it is preprocessed and cached by saving an HDF5 and JSON files to avoid running the preprocessing again. If this parameter is `False`, the HDF5 and JSON file are not saved. :param skip_save_unprocessed_output: (bool, default: `False`) by default predictions and their probabilities are saved in both raw unprocessed numpy files containing tensors and as postprocessed CSV files (one for each output feature). If this parameter is True, only the CSV ones are saved and the numpy ones are skipped. :param skip_save_predictions: (bool, default: `False`) skips saving test predictions CSV files :param skip_save_eval_stats: (bool, default: `False`) skips saving test statistics JSON file :param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving hyperopt stats file. :param output_directory: (str, default: `'results'`) the directory that will contain the training statistics, TensorBoard logs, the saved model and the training progress files. :param gpus: (list, default: `None`) list of GPUs that are available for training. :param gpu_memory_limit: (int, default: `None`) maximum memory in MB to allocate per GPU device. :param allow_parallel_threads: (bool, default: `True`) allow TensorFlow to use multithreading parallelism to improve performance at the cost of determinism. :param use_horovod: (bool, default: `None`) flag for using horovod. :param random_seed: (int: default: 42) random seed used for weights initialization, splits and any other random function. :param debug: (bool, default: `False) if `True` turns on `tfdbg` with `inf_or_nan` checks. **kwargs: # Return :return" (`None`) """ config = check_which_config(config, config_file) return hyperopt( config=config, dataset=dataset, training_set=training_set, validation_set=validation_set, test_set=test_set, training_set_metadata=training_set_metadata, data_format=data_format, experiment_name=experiment_name, model_name=model_name, # model_load_path=model_load_path, # model_resume_path=model_resume_path, skip_save_training_description=skip_save_training_description, skip_save_training_statistics=skip_save_training_statistics, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, skip_save_processed_input=skip_save_processed_input, skip_save_unprocessed_output=skip_save_unprocessed_output, skip_save_predictions=skip_save_predictions, skip_save_eval_stats=skip_save_eval_stats, skip_save_hyperopt_statistics=skip_save_hyperopt_statistics, output_directory=output_directory, gpus=gpus, gpu_memory_limit=gpu_memory_limit, allow_parallel_threads=allow_parallel_threads, use_horovod=use_horovod, random_seed=random_seed, debug=debug, **kwargs, )
def experiment_cli(config: dict, config_file: str = None, dataset: Union[str, dict, pd.DataFrame] = None, training_set: Union[str, dict, pd.DataFrame] = None, validation_set: Union[str, dict, pd.DataFrame] = None, test_set: Union[str, dict, pd.DataFrame] = None, training_set_metadata: Union[str, dict] = None, data_format: str = None, experiment_name: str = 'experiment', model_name: str = 'run', model_load_path: str = None, model_resume_path: str = None, eval_split: str = TEST, skip_save_training_description: bool = False, skip_save_training_statistics: bool = False, skip_save_model: bool = False, skip_save_progress: bool = False, skip_save_log: bool = False, skip_save_processed_input: bool = False, skip_save_unprocessed_output: bool = False, skip_save_predictions: bool = False, skip_save_eval_stats: bool = False, skip_collect_predictions: bool = False, skip_collect_overall_stats: bool = False, output_directory: str = 'results', gpus: Union[str, int, List[int]] = None, gpu_memory_limit: int = None, allow_parallel_threads: bool = True, backend: Union[Backend, str] = None, random_seed: int = default_random_seed, debug: bool = False, logging_level: int = logging.INFO, **kwargs): """Trains a model on a dataset's training and validation splits and uses it to predict on the test split. It saves the trained model and the statistics of training and testing. # Inputs :param config: (dict) config which defines the different parameters of the model, features, preprocessing and training. :param config_file: (str, default: `None`) the filepath string that specifies the config. It is a yaml file. :param dataset: (Union[str, dict, pandas.DataFrame], default: `None`) source containing the entire dataset to be used in the experiment. If it has a split column, it will be used for splitting (0 for train, 1 for validation, 2 for test), otherwise the dataset will be randomly split. :param training_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing training data. :param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing validation data. :param test_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing test data. :param training_set_metadata: (Union[str, dict], default: `None`) metadata JSON file or loaded metadata. Intermediate preprocessed structure containing the mappings of the input dataset created the first time an input file is used in the same directory with the same name and a '.meta.json' extension. :param data_format: (str, default: `None`) format to interpret data sources. Will be inferred automatically if not specified. Valid formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`, `'fwf'`, `'hdf5'` (cache file produced during previous training), `'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`, `'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`, `'stata'`, `'tsv'`. :param experiment_name: (str, default: `'experiment'`) name for the experiment. :param model_name: (str, default: `'run'`) name of the model that is being used. :param model_load_path: (str, default: `None`) if this is specified the loaded model will be used as initialization (useful for transfer learning). :param model_resume_path: (str, default: `None`) resumes training of the model from the path specified. The config is restored. In addition to config, training statistics and loss for epoch and the state of the optimizer are restored such that training can be effectively continued from a previously interrupted training process. :param eval_split: (str, default: `test`) split on which to perform evaluation. Valid values are `training`, `validation` and `test`. :param skip_save_training_description: (bool, default: `False`) disables saving the description JSON file. :param skip_save_training_statistics: (bool, default: `False`) disables saving training statistics JSON file. :param skip_save_model: (bool, default: `False`) disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation metric improves, but if the model is really big that can be time consuming. If you do not want to keep the weights and just find out what performance a model can get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on and the returned model will have the weights obtained at the end of training, instead of the weights of the epoch with the best validation performance. :param skip_save_progress: (bool, default: `False`) disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :param skip_save_log: (bool, default: `False`) disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed. :param skip_save_processed_input: (bool, default: `False`) if input dataset is provided it is preprocessed and cached by saving an HDF5 and JSON files to avoid running the preprocessing again. If this parameter is `False`, the HDF5 and JSON file are not saved. :param skip_save_unprocessed_output: (bool, default: `False`) by default predictions and their probabilities are saved in both raw unprocessed numpy files containing tensors and as postprocessed CSV files (one for each output feature). If this parameter is True, only the CSV ones are saved and the numpy ones are skipped. :param skip_save_predictions: (bool, default: `False`) skips saving test predictions CSV files :param skip_save_eval_stats: (bool, default: `False`) skips saving test statistics JSON file :param skip_collect_predictions: (bool, default: `False`) skips collecting post-processed predictions during eval. :param skip_collect_overall_stats: (bool, default: `False`) skips collecting overall stats during eval. :param output_directory: (str, default: `'results'`) the directory that will contain the training statistics, TensorBoard logs, the saved model and the training progress files. :param gpus: (list, default: `None`) list of GPUs that are available for training. :param gpu_memory_limit: (int, default: `None`) maximum memory in MB to allocate per GPU device. :param allow_parallel_threads: (bool, default: `True`) allow TensorFlow to use multithreading parallelism to improve performance at the cost of determinism. :param backend: (Union[Backend, str]) `Backend` or string name of backend to use to execute preprocessing / training steps. :param random_seed: (int: default: 42) random seed used for weights initialization, splits and any other random function. :param debug: (bool, default: `False) if `True` turns on `tfdbg` with `inf_or_nan` checks. :param logging_level: (int) Log level that will be sent to stderr. # Return :return: (Tuple[LudwigModel, dict, dict, tuple, str)) `(model, evaluation_statistics, training_statistics, preprocessed_data, output_directory)` `model` LudwigModel instance `evaluation_statistics` dictionary with evaluation performance statistics on the test_set, `training_statistics` is a dictionary of training statistics for each output feature containing loss and metrics values for each epoch, `preprocessed_data` tuple containing preprocessed `(training_set, validation_set, test_set)`, `output_directory` filepath string to where results are stored. """ backend = initialize_backend(backend) config = check_which_config(config, config_file) if model_load_path: model = LudwigModel.load(model_load_path) else: model = LudwigModel( config=config, logging_level=logging_level, backend=backend, gpus=gpus, gpu_memory_limit=gpu_memory_limit, allow_parallel_threads=allow_parallel_threads, ) (eval_stats, train_stats, preprocessed_data, output_directory) = model.experiment( dataset=dataset, training_set=training_set, validation_set=validation_set, test_set=test_set, training_set_metadata=training_set_metadata, data_format=data_format, experiment_name=experiment_name, model_name=model_name, model_resume_path=model_resume_path, eval_split=eval_split, skip_save_training_description=skip_save_training_description, skip_save_training_statistics=skip_save_training_statistics, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, skip_save_processed_input=skip_save_processed_input, skip_save_unprocessed_output=skip_save_unprocessed_output, skip_save_predictions=skip_save_predictions, skip_save_eval_stats=skip_save_eval_stats, skip_collect_predictions=skip_collect_predictions, skip_collect_overall_stats=skip_collect_overall_stats, output_directory=output_directory, random_seed=random_seed, debug=debug, ) return model, eval_stats, train_stats, preprocessed_data, output_directory
def preprocess_cli(preprocessing_config: dict = None, preprocessing_config_file: str = None, dataset: Union[str, dict, pd.DataFrame] = None, training_set: Union[str, dict, pd.DataFrame] = None, validation_set: Union[str, dict, pd.DataFrame] = None, test_set: Union[str, dict, pd.DataFrame] = None, training_set_metadata: Union[str, dict] = None, data_format: str = None, random_seed: int = default_random_seed, logging_level: int = logging.INFO, debug: bool = False, **kwargs) -> None: """*train* defines the entire training procedure used by Ludwig's internals. Requires most of the parameters that are taken into the model. Builds a full ludwig model and performs the training. :param config: (dict) config which defines the different parameters of the model, features, preprocessing and training. :param config_file: (str, default: `None`) the filepath string that specifies the config. It is a yaml file. :param dataset: (Union[str, dict, pandas.DataFrame], default: `None`) source containing the entire dataset to be used for training. If it has a split column, it will be used for splitting (0 for train, 1 for validation, 2 for test), otherwise the dataset will be randomly split. :param training_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing training data. :param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing validation data. :param test_set: (Union[str, dict, pandas.DataFrame], default: `None`) source containing test data. :param training_set_metadata: (Union[str, dict], default: `None`) metadata JSON file or loaded metadata. Intermediate preprocessed structure containing the mappings of the input dataset created the first time an input file is used in the same directory with the same name and a '.meta.json' extension. :param data_format: (str, default: `None`) format to interpret data sources. Will be inferred automatically if not specified. Valid formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`, `'fwf'`, `'hdf5'` (cache file produced during previous training), `'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`, `'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`, `'stata'`, `'tsv'`. :param experiment_name: (str, default: `'experiment'`) name for the experiment. :param model_name: (str, default: `'run'`) name of the model that is being used. :param model_load_path: (str, default: `None`) if this is specified the loaded model will be used as initialization (useful for transfer learning). :param model_resume_path: (str, default: `None`) resumes training of the model from the path specified. The config is restored. In addition to config, training statistics, loss for each epoch and the state of the optimizer are restored such that training can be effectively continued from a previously interrupted training process. :param skip_save_training_description: (bool, default: `False`) disables saving the description JSON file. :param skip_save_training_statistics: (bool, default: `False`) disables saving training statistics JSON file. :param skip_save_model: (bool, default: `False`) disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation metric improves, but if the model is really big that can be time consuming. If you do not want to keep the weights and just find out what performance a model can get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on and the returned model will have the weights obtained at the end of training, instead of the weights of the epoch with the best validation performance. :param skip_save_progress: (bool, default: `False`) disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :param skip_save_log: (bool, default: `False`) disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed. :param skip_save_processed_input: (bool, default: `False`) if input dataset is provided it is preprocessed and cached by saving an HDF5 and JSON files to avoid running the preprocessing again. If this parameter is `False`, the HDF5 and JSON file are not saved. :param output_directory: (str, default: `'results'`) the directory that will contain the training statistics, TensorBoard logs, the saved model and the training progress files. :param gpus: (list, default: `None`) list of GPUs that are available for training. :param gpu_memory_limit: (int, default: `None`) maximum memory in MB to allocate per GPU device. :param allow_parallel_threads: (bool, default: `True`) allow TensorFlow to use multithreading parallelism to improve performance at the cost of determinism. :param use_horovod: (bool, default: `None`) flag for using horovod. :param random_seed: (int: default: 42) random seed used for weights initialization, splits and any other random function. :param debug: (bool, default: `False) if `True` turns on `tfdbg` with `inf_or_nan` checks. :param logging_level: (int) Log level that will be sent to stderr. # Return :return: (`None`) """ preprocessing_config = check_which_config(preprocessing_config, preprocessing_config_file) model = LudwigModel( config=preprocessing_config, logging_level=logging_level, ) model.preprocess( dataset=dataset, training_set=training_set, validation_set=validation_set, test_set=test_set, training_set_metadata=training_set_metadata, data_format=data_format, skip_save_processed_input=False, random_seed=random_seed, debug=debug, )