def full_train(model_definition, model_definition_file=None, data_df=None, data_train_df=None, data_validation_df=None, data_test_df=None, data_csv=None, data_train_csv=None, data_validation_csv=None, data_test_csv=None, data_hdf5=None, data_train_hdf5=None, data_validation_hdf5=None, data_test_hdf5=None, train_set_metadata_json=None, experiment_name='experiment', model_name='run', model_load_path=None, model_resume_path=None, skip_save_model=False, skip_save_progress=False, skip_save_log=False, skip_save_processed_input=False, output_directory='results', should_close_session=True, gpus=None, gpu_fraction=1.0, use_horovod=False, random_seed=42, debug=False, **kwargs): """*full_train* defines the entire training procedure used by Ludwig's internals. Requires most of the parameters that are taken into the model. Builds a full ludwig model and performs the training. :param data_test_df: :param data_df: :param data_train_df: :param data_validation_df: :param model_definition: Model definition which defines the different parameters of the model, features, preprocessing and training. :type model_definition: Dictionary :param model_definition_file: The file that specifies the model definition. It is a yaml file. :type model_definition_file: filepath (str) :param data_csv: A CSV file contanining the input data which is used to train, validate and test a model. The CSV either contains a split column or will be split. :type data_csv: filepath (str) :param data_train_csv: A CSV file contanining the input data which is used to train a model. :type data_train_csv: filepath (str) :param data_validation_csv: A CSV file contanining the input data which is used to validate a model.. :type data_validation_csv: filepath (str) :param data_test_csv: A CSV file contanining the input data which is used to test a model. :type data_test_csv: filepath (str) :param data_hdf5: If the dataset is in the hdf5 format, this is used instead of the csv file. :type data_hdf5: filepath (str) :param data_train_hdf5: If the training set is in the hdf5 format, this is used instead of the csv file. :type data_train_hdf5: filepath (str) :param data_validation_hdf5: If the validation set is in the hdf5 format, this is used instead of the csv file. :type data_validation_hdf5: filepath (str) :param data_test_hdf5: If the test set is in the hdf5 format, this is used instead of the csv file. :type data_test_hdf5: filepath (str) :param train_set_metadata_json: If the dataset is in hdf5 format, this is the associated json file containing metadata. :type train_set_metadata_json: filepath (str) :param experiment_name: The name for the experiment. :type experiment_name: Str :param model_name: Name of the model that is being used. :type model_name: Str :param model_load_path: If this is specified the loaded model will be used as initialization (useful for transfer learning). :type model_load_path: filepath (str) :param model_resume_path: Resumes training of the model from the path specified. The difference with model_load_path is that also training statistics like the current epoch and the loss and performance so far are also resumed effectively cotinuing a previously interrupted training process. :type model_resume_path: filepath (str) :param skip_save_model: Disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation measure imrpvoes, but if the model is really big that can be time consuming if you do not want to keep the weights and just find out what performance can a model get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on. :type skip_save_model: Boolean :param skip_save_progress: Disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :type skip_save_progress: Boolean :param skip_save_processed_input: If a CSV dataset is provided it is preprocessed and then saved as an hdf5 and json to avoid running the preprocessing again. If this parameter is False, the hdf5 and json file are not saved. :type skip_save_processed_input: Boolean :param skip_save_log: Disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed.. :type skip_save_progress: Boolean :param output_directory: The directory that will contanin the training statistics, the saved model and the training procgress files. :type output_directory: filepath (str) :param gpus: List of GPUs that are available for training. :type gpus: List :param gpu_fraction: Fraction of the memory of each GPU to use at the beginning of the training. The memory may grow elastically. :type gpu_fraction: Integer :param random_seed: Random seed used for weights initialization, splits and any other random function. :type random_seed: Integer :param debug: If true turns on tfdbg with inf_or_nan checks. :type debug: Boolean :returns: None """ # set input features defaults if model_definition_file is not None: with open(model_definition_file, 'r') as def_file: model_definition = merge_with_defaults(yaml.safe_load(def_file)) else: model_definition = merge_with_defaults(model_definition) # setup directories and file names experiment_dir_name = None if model_resume_path is not None: if os.path.exists(model_resume_path): experiment_dir_name = model_resume_path else: if is_on_master(): logger.info('Model resume path does not exists, ' 'starting training from scratch') model_resume_path = None if model_resume_path is None: if is_on_master(): experiment_dir_name = get_experiment_dir_name( output_directory, experiment_name, model_name) else: experiment_dir_name = '.' # if model_load_path is not None, load its train_set_metadata if model_load_path is not None: train_set_metadata_json = os.path.join(model_load_path, TRAIN_SET_METADATA_FILE_NAME) description_fn, training_stats_fn, model_dir = get_file_names( experiment_dir_name) # save description description = get_experiment_description( model_definition, data_csv=data_csv, data_train_csv=data_train_csv, data_validation_csv=data_validation_csv, data_test_csv=data_test_csv, data_hdf5=data_hdf5, data_train_hdf5=data_train_hdf5, data_validation_hdf5=data_validation_hdf5, data_test_hdf5=data_test_hdf5, metadata_json=train_set_metadata_json, random_seed=random_seed) if is_on_master(): save_json(description_fn, description) # print description logger.info('Experiment name: {}'.format(experiment_name)) logger.info('Model name: {}'.format(model_name)) logger.info('Output path: {}'.format(experiment_dir_name)) logger.info('\n') for key, value in description.items(): logger.info('{}: {}'.format(key, pformat(value, indent=4))) logger.info('\n') # preprocess preprocessed_data = preprocess_for_training( model_definition, data_df=data_df, data_train_df=data_train_df, data_validation_df=data_validation_df, data_test_df=data_test_df, data_csv=data_csv, data_train_csv=data_train_csv, data_validation_csv=data_validation_csv, data_test_csv=data_test_csv, data_hdf5=data_hdf5, data_train_hdf5=data_train_hdf5, data_validation_hdf5=data_validation_hdf5, data_test_hdf5=data_test_hdf5, train_set_metadata_json=train_set_metadata_json, skip_save_processed_input=skip_save_processed_input, preprocessing_params=model_definition['preprocessing'], random_seed=random_seed) (training_set, validation_set, test_set, train_set_metadata) = preprocessed_data if is_on_master(): logger.info('Training set: {0}'.format(training_set.size)) if validation_set is not None: logger.info('Validation set: {0}'.format(validation_set.size)) if test_set is not None: logger.info('Test set: {0}'.format(test_set.size)) # update model definition with metadata properties update_model_definition_with_metadata(model_definition, train_set_metadata) if is_on_master(): if not skip_save_model: # save train set metadata os.makedirs(model_dir, exist_ok=True) save_json(os.path.join(model_dir, TRAIN_SET_METADATA_FILE_NAME), train_set_metadata) # run the experiment model, result = train(training_set=training_set, validation_set=validation_set, test_set=test_set, model_definition=model_definition, save_path=model_dir, model_load_path=model_load_path, resume=model_resume_path is not None, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, gpus=gpus, gpu_fraction=gpu_fraction, use_horovod=use_horovod, random_seed=random_seed, debug=debug) train_trainset_stats, train_valisest_stats, train_testset_stats = result train_stats = { 'train': train_trainset_stats, 'validation': train_valisest_stats, 'test': train_testset_stats } if should_close_session: model.close_session() if is_on_master(): # save training and test statistics save_json(training_stats_fn, train_stats) # grab the results of the model with highest validation test performance validation_field = model_definition['training']['validation_field'] validation_measure = model_definition['training']['validation_measure'] validation_field_result = train_valisest_stats[validation_field] best_function = get_best_function(validation_measure) # results of the model with highest validation test performance if is_on_master() and validation_set is not None: epoch_best_vali_measure, best_vali_measure = best_function( enumerate(validation_field_result[validation_measure]), key=lambda pair: pair[1]) logger.info( 'Best validation model epoch: {0}'.format(epoch_best_vali_measure + 1)) logger.info( 'Best validation model {0} on validation set {1}: {2}'.format( validation_measure, validation_field, best_vali_measure)) if test_set is not None: best_vali_measure_epoch_test_measure = train_testset_stats[ validation_field][validation_measure][epoch_best_vali_measure] logger.info( 'Best validation model {0} on test set {1}: {2}'.format( validation_measure, validation_field, best_vali_measure_epoch_test_measure)) logger.info('\nFinished: {0}_{1}'.format(experiment_name, model_name)) logger.info('Saved to: {0}'.format(experiment_dir_name)) contrib_command("train_save", experiment_dir_name) return (model, preprocessed_data, experiment_dir_name, train_stats, model_definition)
help='port for server (default: 8000)', default=8000, type=int, ) parser.add_argument( '-H', '--host', help='host for server (default: 0.0.0.0)', default='0.0.0.0' ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.serve') print_ludwig('Serve', LUDWIG_VERSION) run_server(args.model_path, args.host, args.port) if __name__ == '__main__': contrib_import() contrib_command("serve", *sys.argv) cli(sys.argv[1:])
action='store_true', default=False, help='uses horovod for distributed training') parser.add_argument('-dbg', '--debug', action='store_true', default=False, help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) logging.getLogger('ludwig').setLevel( logging_level_registry[args.logging_level]) set_on_master(args.use_horovod) if is_on_master(): print_ludwig('Train', LUDWIG_VERSION) full_train(**vars(args)) if __name__ == '__main__': contrib_command("train", *sys.argv) cli(sys.argv[1:])
def train(training_set, validation_set, test_set, model_definition, save_path='model', model_load_path=None, resume=False, skip_save_model=False, skip_save_progress=False, skip_save_log=False, gpus=None, gpu_fraction=1.0, use_horovod=False, random_seed=default_random_seed, debug=False): """ :param training_set: Dataset contaning training data :type training_set: Dataset :param validation_set: Dataset contaning validation data :type validation_set: Datasetk :param test_set: Dataset contaning test data. :type test_set: Dataset :param model_definition: Model definition which defines the different parameters of the model, features, preprocessing and training. :type model_definition: Dictionary :param save_path: The path to save the model to. :type save_path: filepath (str) :param model_load_path: If this is specified the loaded model will be used as initialization (useful for transfer learning). :type model_load_path: filepath (str) :param skip_save_model: Disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation measure imrpvoes, but if the model is really big that can be time consuming if you do not want to keep the weights and just find out what performance can a model get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on. :type skip_save_model: Boolean :param skip_save_progress: Disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :type skip_save_progress: Boolean :param skip_save_log: Disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed.. :type skip_save_log: Boolean :param gpus: List of GPUs that are available for training. :type gpus: List :param gpu_fraction: Fraction of the memory of each GPU to use at the beginning of the training. The memory may grow elastically. :type gpu_fraction: Integer :param random_seed: Random seed used for weights initialization, splits and any other random function. :type random_seed: Integer :param debug: If true turns on tfdbg with inf_or_nan checks. :type debug: Boolean :returns: None """ if model_load_path is not None: # Load model if is_on_master(): print_boxed('LOADING MODEL') logger.info('Loading model: {}\n'.format(model_load_path)) model, _ = load_model_and_definition(model_load_path) else: # Build model if is_on_master(): print_boxed('BUILDING MODEL', print_fun=logger.debug) model = Model(model_definition['input_features'], model_definition['output_features'], model_definition['combiner'], model_definition['training'], model_definition['preprocessing'], use_horovod=use_horovod, random_seed=random_seed, debug=debug) contrib_command("train_model", model, model_definition, model_load_path) # Train model if is_on_master(): print_boxed('TRAINING') return model, model.train(training_set, validation_set=validation_set, test_set=test_set, save_path=save_path, resume=resume, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, gpus=gpus, gpu_fraction=gpu_fraction, random_seed=random_seed, **model_definition['training'])
default=False, help="enables debugging mode", ) parser.add_argument( "-l", "--logging_level", default="info", help="the level of logging to use", choices=["critical", "error", "warning", "info", "debug", "notset"], ) args = parser.parse_args(sys_argv) logging.getLogger('ludwig').setLevel( logging_level_registry[args.logging_level]) global logger logger = logging.getLogger('ludwig.hyperopt') set_on_master(args.use_horovod) if is_on_master(): print_ludwig("Hyperopt", LUDWIG_VERSION) hyperopt(**vars(args)) if __name__ == "__main__": contrib_import() contrib_command("hyperopt", *sys.argv) cli(sys.argv[1:])
default=False, help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel(args.logging_level) global logger logger = logging.getLogger('ludwig.experiment') args.backend = initialize_backend(args.backend) if args.backend.is_coordinator(): print_ludwig('Experiment', LUDWIG_VERSION) if args.k_fold is None: experiment_cli(**vars(args)) else: kfold_cross_validate_cli(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("experiment", *sys.argv) cli(sys.argv[1:])
def full_experiment(model_definition, model_definition_file=None, data_df=None, data_train_df=None, data_validation_df=None, data_test_df=None, data_csv=None, data_train_csv=None, data_validation_csv=None, data_test_csv=None, data_hdf5=None, data_train_hdf5=None, data_validation_hdf5=None, data_test_hdf5=None, train_set_metadata_json=None, experiment_name='experiment', model_name='run', model_load_path=None, model_resume_path=None, skip_save_training_description=False, skip_save_training_statistics=False, skip_save_model=False, skip_save_progress=False, skip_save_log=False, skip_save_processed_input=False, skip_save_unprocessed_output=False, skip_save_test_predictions=False, skip_save_test_statistics=False, output_directory='results', gpus=None, gpu_memory_limit=None, allow_parallel_threads=True, use_horovod=None, random_seed=default_random_seed, debug=False, **kwargs): """Trains a model on a dataset's training and validation splits and uses it to predict on the test split. It saves the trained model and the statistics of training and testing. :param model_definition: Model definition which defines the different parameters of the model, features, preprocessing and training. :type model_definition: Dictionary :param model_definition_file: The file that specifies the model definition. It is a yaml file. :type model_definition_file: filepath (str) :param data_csv: A CSV file containing the input data which is used to train, validate and test a model. The CSV either contains a split column or will be split. :type data_csv: filepath (str) :param data_train_csv: A CSV file containing the input data which is used to train a model. :type data_train_csv: filepath (str) :param data_validation_csv: A CSV file containing the input data which is used to validate a model.. :type data_validation_csv: filepath (str) :param data_test_csv: A CSV file containing the input data which is used to test a model. :type data_test_csv: filepath (str) :param data_hdf5: If the dataset is in the hdf5 format, this is used instead of the csv file. :type data_hdf5: filepath (str) :param data_train_hdf5: If the training set is in the hdf5 format, this is used instead of the csv file. :type data_train_hdf5: filepath (str) :param data_validation_hdf5: If the validation set is in the hdf5 format, this is used instead of the csv file. :type data_validation_hdf5: filepath (str) :param data_test_hdf5: If the test set is in the hdf5 format, this is used instead of the csv file. :type data_test_hdf5: filepath (str) :param train_set_metadata_json: If the dataset is in hdf5 format, this is the associated json file containing metadata. :type train_set_metadata_json: filepath (str) :param experiment_name: The name for the experiment. :type experiment_name: Str :param model_name: Name of the model that is being used. :type model_name: Str :param model_load_path: If this is specified the loaded model will be used as initialization (useful for transfer learning). :type model_load_path: filepath (str) :param model_resume_path: Resumes training of the model from the path specified. The difference with model_load_path is that also training statistics like the current epoch and the loss and performance so far are also resumed effectively continuing a previously interrupted training process. :type model_resume_path: filepath (str) :param skip_save_training_description: Disables saving the description JSON file. :type skip_save_training_description: Boolean :param skip_save_training_statistics: Disables saving training statistics JSON file. :type skip_save_training_statistics: Boolean :param skip_save_model: Disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation metric improves, but if the model is really big that can be time consuming if you do not want to keep the weights and just find out what performance can a model get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on. :type skip_save_model: Boolean :param skip_save_progress: Disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :type skip_save_progress: Boolean :param skip_save_log: Disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed.. :type skip_save_log: Boolean :param skip_save_processed_input: If a CSV dataset is provided it is preprocessed and then saved as an hdf5 and json to avoid running the preprocessing again. If this parameter is False, the hdf5 and json file are not saved. :type skip_save_processed_input: Boolean :param skip_save_unprocessed_output: By default predictions and their probabilities are saved in both raw unprocessed numpy files containing tensors and as postprocessed CSV files (one for each output feature). If this parameter is True, only the CSV ones are saved and the numpy ones are skipped. :type skip_save_unprocessed_output: Boolean :param skip_save_test_predictions: skips saving test predictions CSV files :type skip_save_test_predictions: Boolean :param skip_save_test_statistics: skips saving test statistics JSON file :type skip_save_test_statistics: Boolean :param output_directory: The directory that will contain the training statistics, the saved model and the training progress files. :type output_directory: filepath (str) :param gpus: List of GPUs that are available for training. :type gpus: List :param gpu_memory_limit: maximum memory in MB to allocate per GPU device. :type gpu_memory_limit: Integer :param allow_parallel_threads: allow TensorFlow to use multithreading parallelism to improve performance at the cost of determinism. :type allow_parallel_threads: Boolean :param use_horovod: Flag for using horovod :type use_horovod: Boolean :param random_seed: Random seed used for weights initialization, splits and any other random function. :type random_seed: Integer :param debug: If true turns on tfdbg with inf_or_nan checks. :type debug: Boolean """ set_on_master(use_horovod) ( model, preprocessed_data, experiment_dir_name, _, # train_stats model_definition, test_results) = experiment( model_definition, model_definition_file=model_definition_file, data_df=data_df, data_train_df=data_train_df, data_validation_df=data_validation_df, data_test_df=data_test_df, data_csv=data_csv, data_train_csv=data_train_csv, data_validation_csv=data_validation_csv, data_test_csv=data_test_csv, data_hdf5=data_hdf5, data_train_hdf5=data_train_hdf5, data_validation_hdf5=data_validation_hdf5, data_test_hdf5=data_test_hdf5, train_set_metadata_json=train_set_metadata_json, experiment_name=experiment_name, model_name=model_name, model_load_path=model_load_path, model_resume_path=model_resume_path, skip_save_training_description=skip_save_training_description, skip_save_training_statistics=skip_save_training_statistics, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, skip_save_processed_input=skip_save_processed_input, output_directory=output_directory, gpus=gpus, gpu_memory_limit=gpu_memory_limit, allow_parallel_threads=allow_parallel_threads, use_horovod=use_horovod, random_seed=random_seed, debug=debug, **kwargs) (training_set, validation_set, test_set, train_set_metadata) = preprocessed_data if test_set is not None: # check if we need to create the output dir if is_on_master(): if not (skip_save_unprocessed_output and skip_save_test_predictions and skip_save_test_statistics): if not os.path.exists(experiment_dir_name): os.makedirs(experiment_dir_name) # postprocess postprocessed_output = postprocess( test_results, model_definition['output_features'], train_set_metadata, experiment_dir_name, skip_save_unprocessed_output or not is_on_master()) if is_on_master(): print_test_results(test_results) if not skip_save_test_predictions: save_prediction_outputs(postprocessed_output, experiment_dir_name) if not skip_save_test_statistics: save_test_statistics(test_results, experiment_dir_name) if is_on_master(): logger.info('\nFinished: {0}_{1}'.format(experiment_name, model_name)) logger.info('Saved to: {}'.format(experiment_dir_name)) contrib_command("experiment_save", experiment_dir_name) return experiment_dir_name
action='store_true', default=False, help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) logging.getLogger('ludwig').setLevel( logging_level_registry[args.logging_level]) print_ludwig('Collect Weights', LUDWIG_VERSION) collect_weights(**vars(args)) if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] == 'activations': contrib_command("collect_activations", *sys.argv) cli_collect_activations(sys.argv[2:]) elif sys.argv[1] == 'weights': contrib_command("collect_weights", *sys.argv) cli_collect_weights(sys.argv[2:]) else: print('Unrecognized command') else: print('Unrecognized command')
{name: h3_1, type: h3}, \ {name: h3_2, type: h3}, \ {name: vector_1, type: vector}, \ {name: vector_2, type: vector}, \ ]', type=yaml.safe_load, help='list of features to generate in YAML format. ' 'Provide a list containing one dictionary for each feature, ' 'each dictionary must include a name, a type ' 'and can include some generation parameters depending on the type') args = parser.parse_args(sys_argv) # No log level parameter this is placeholder if we add at later date # args.logging_level = logging_level_registry[args.logging_level] # logging.getLogger('ludwig').setLevel( # args.logging_level # ) # global logger # logger = logging.getLogger('ludwig.data.dataset_synthesizer') if is_on_master(): print_ludwig('Synthesize Dataset', LUDWIG_VERSION) cli_synthesize_dataset(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("synthesize_dataset", *sys.argv) cli(sys.argv[1:])
default=False, help='uses horovod for distributed training') parser.add_argument('-dbg', '--debug', action='store_true', default=False, help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) args.evaluate_performance = True logging.getLogger('ludwig').setLevel( logging_level_registry[args.logging_level]) set_on_master(args.use_horovod) if is_on_master(): print_ludwig('Test', LUDWIG_VERSION) full_predict(**vars(args)) if __name__ == '__main__': contrib_command("test", *sys.argv) cli(sys.argv[1:])
parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) args.evaluate_performance = True args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel(args.logging_level) global logger logger = logging.getLogger('ludwig.test_performance') set_on_master(args.use_horovod) if is_on_master(): print_ludwig('Test', LUDWIG_VERSION) logger.info('Dataset path: {}'.format(args.dataset)) logger.info('Model path: {}'.format(args.model_path)) logger.info('') evaluate_cli(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("evaluate", *sys.argv) cli(sys.argv[1:])
help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel(args.logging_level) global logger logger = logging.getLogger('ludwig.predict') set_on_master(args.use_horovod) if is_on_master(): print_ludwig('Predict', LUDWIG_VERSION) logger.info('Dataset path: {}'.format(args.dataset)) logger.info('Model path: {}'.format(args.model_path)) logger.info('') predict_cli(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("predict", *sys.argv) cli(sys.argv[1:])
help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'] ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.export') print_ludwig('Export Neuropod', LUDWIG_VERSION) export_neuropod(**vars(args)) if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] == 'savedmodel': contrib_command("export_savedmodel", *sys.argv) cli_export_savedmodel(sys.argv[2:]) elif sys.argv[1] == 'neuropod': contrib_command("export_neuropod", *sys.argv) cli_export_neuropod(sys.argv[2:]) else: print('Unrecognized command') else: print('Unrecognized command')
'initialization and training set shuffling') parser.add_argument('-dbg', '--debug', action='store_true', default=False, help='enables debugging mode') parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel(args.logging_level) global logger logger = logging.getLogger('ludwig.preprocess') if is_on_master(): print_ludwig('Preprocess', LUDWIG_VERSION) preprocess_cli(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("preprocess", *sys.argv) cli(sys.argv[1:])
def train( self, dataset=None, training_set=None, validation_set=None, test_set=None, training_set_metadata=None, data_format=None, experiment_name='api_experiment', model_name='run', model_resume_path=None, skip_save_training_description=False, skip_save_training_statistics=False, skip_save_model=False, skip_save_progress=False, skip_save_log=False, skip_save_processed_input=False, output_directory='results', random_seed=default_random_seed, debug=False, **kwargs ): """This function is used to perform a full training of the model on the specified dataset. # Inputs :param dataset: (string, dict, DataFrame) source containing the entire dataset. If it has a split column, it will be used for splitting (0: train, 1: validation, 2: test), otherwise the dataset will be randomly split. :param training_set: (string, dict, DataFrame) source containing training data. :param validation_set: (string, dict, DataFrame) source containing validation data. :param test_set: (string, dict, DataFrame) source containing test data. :param training_set_metadata: (string, dict) metadata JSON file or loaded metadata. Intermediate preprocess structure containing the mappings of the input CSV created the first time a CSV file is used in the same directory with the same name and a '.json' extension. :param data_format: (string) format to interpret data sources. Will be inferred automatically if not specified. :param experiment_name: (string) a name for the experiment, used for the save directory :param model_name: (string) a name for the model, used for the save directory :param model_resume_path: (string) path of a the model directory to resume training of :param skip_save_training_description: (bool, default: `False`) disables saving the description JSON file. :param skip_save_training_statistics: (bool, default: `False`) disables saving training statistics JSON file. :param skip_save_model: (bool, default: `False`) disables saving model weights and hyperparameters each time the model improves. By default Ludwig saves model weights after each epoch the validation metric imrpvoes, but if the model is really big that can be time consuming if you do not want to keep the weights and just find out what performance can a model get with a set of hyperparameters, use this parameter to skip it, but the model will not be loadable later on. :param skip_save_progress: (bool, default: `False`) disables saving progress each epoch. By default Ludwig saves weights and stats after each epoch for enabling resuming of training, but if the model is really big that can be time consuming and will uses twice as much space, use this parameter to skip it, but training cannot be resumed later on. :param skip_save_log: (bool, default: `False`) disables saving TensorBoard logs. By default Ludwig saves logs for the TensorBoard, but if it is not needed turning it off can slightly increase the overall speed. :param skip_save_processed_input: (bool, default: `False`) skips saving intermediate HDF5 and JSON files :param output_directory: (string, default: `'results'`) directory that contains the results :param random_seed: (int, default`42`) a random seed that is going to be used anywhere there is a call to a random number generator: data splitting, parameter initialization and training set shuffling :param debug: (bool, default: `False`) enables debugging mode There are three ways to provide data: by dataframes using the `_df` parameters, by CSV using the `_csv` parameters and by HDF5 and JSON, using `_hdf5` and `_json` parameters. The DataFrame approach uses data previously obtained and put in a dataframe, the CSV approach loads data from a CSV file, while HDF5 and JSON load previously preprocessed HDF5 and JSON files (they are saved in the same directory of the CSV they are obtained from). For all three approaches either a full dataset can be provided (which will be split randomly according to the split probabilities defined in the model definition, by default 70% training, 10% validation and 20% test) or, if it contanins a plit column, it will be plit according to that column (interpreting 0 as training, 1 as validation and 2 as test). Alternatively separated dataframes / CSV / HDF5 files can beprovided for each split. During training the model and statistics will be saved in a directory `[output_dir]/[experiment_name]_[model_name]_n` where all variables are resolved to user spiecified ones and `n` is an increasing number starting from 0 used to differentiate different runs. # Return :return: ((dict, DataFrame)) tuple containing: - A dictionary of training statistics for each output feature containing loss and metrics values for each epoch. The second return - A Pandas DataFrame of preprocessed training data. """ # setup directories and file names if model_resume_path is not None: if os.path.exists(model_resume_path): output_directory = model_resume_path else: if is_on_master(): logger.info( 'Model resume path does not exists, ' 'starting training from scratch' ) model_resume_path = None if model_resume_path is None: if is_on_master(): output_directory = get_output_directory( output_directory, experiment_name, model_name ) else: output_directory = None # if we are skipping all saving, # there is no need to create a directory that will remain empty should_create_output_directory = not ( skip_save_training_description and skip_save_training_statistics and skip_save_model and skip_save_progress and skip_save_log and skip_save_processed_input ) description_fn = training_stats_fn = model_dir = None if is_on_master(): if should_create_output_directory: if not os.path.exists(output_directory): os.makedirs(output_directory, exist_ok=True) description_fn, training_stats_fn, model_dir = get_file_names( output_directory) # save description if is_on_master(): description = get_experiment_description( self.model_definition, dataset=dataset, training_set=training_set, validation_set=validation_set, test_set=test_set, training_set_metadata=training_set_metadata, data_format=data_format, random_seed=random_seed ) if not skip_save_training_description: save_json(description_fn, description) # print description logger.info('Experiment name: {}'.format(experiment_name)) logger.info('Model name: {}'.format(model_name)) logger.info('Output directory: {}'.format(output_directory)) logger.info('\n') for key, value in description.items(): logger.info('{}: {}'.format(key, pformat(value, indent=4))) logger.info('\n') # preprocess preprocessed_data = preprocess_for_training( self.model_definition, dataset=dataset, training_set=training_set, validation_set=validation_set, test_set=test_set, training_set_metadata=training_set_metadata, data_format=data_format, skip_save_processed_input=skip_save_processed_input, preprocessing_params=self.model_definition[PREPROCESSING], random_seed=random_seed ) (training_set, validation_set, test_set, training_set_metadata) = preprocessed_data self.training_set_metadata = training_set_metadata if is_on_master(): logger.info('Training set: {0}'.format(training_set.size)) if validation_set is not None: logger.info('Validation set: {0}'.format(validation_set.size)) if test_set is not None: logger.info('Test set: {0}'.format(test_set.size)) if is_on_master(): if not skip_save_model: # save train set metadata os.makedirs(model_dir, exist_ok=True) save_json( os.path.join( model_dir, TRAIN_SET_METADATA_FILE_NAME ), training_set_metadata ) contrib_command("train_init", experiment_directory=output_directory, experiment_name=experiment_name, model_name=model_name, output_directory=output_directory, resume=model_resume_path is not None) # Build model if not provided # if it was provided it means it was already loaded if not self.model: if is_on_master(): print_boxed('MODEL', print_fun=logger.debug) # update model definition with metadata properties update_model_definition_with_metadata( self.model_definition, training_set_metadata ) self.model = LudwigModel.create_model(self.model_definition, random_seed=random_seed) # init trainer trainer = Trainer( **self.model_definition[TRAINING], resume=model_resume_path is not None, skip_save_model=skip_save_model, skip_save_progress=skip_save_progress, skip_save_log=skip_save_log, random_seed=random_seed, horoovd=self._horovod, debug=debug ) contrib_command("train_model", self.model, self.model_definition, self.model_definition_fp) # train model if is_on_master(): print_boxed('TRAINING') if not skip_save_model: self.save_model_definition(model_dir) train_stats = trainer.train( self.model, training_set, validation_set=validation_set, test_set=test_set, save_path=model_dir, ) train_trainset_stats, train_valiset_stats, train_testset_stats = train_stats train_stats = { TRAINING: train_trainset_stats, VALIDATION: train_valiset_stats, TEST: train_testset_stats } # save training statistics if is_on_master(): if not skip_save_training_statistics: save_json(training_stats_fn, train_stats) # grab the results of the model with highest validation test performance validation_field = trainer.validation_field validation_metric = trainer.validation_metric validation_field_result = train_valiset_stats[validation_field] best_function = get_best_function(validation_metric) # results of the model with highest validation test performance if is_on_master() and validation_set is not None: epoch_best_vali_metric, best_vali_metric = best_function( enumerate(validation_field_result[validation_metric]), key=lambda pair: pair[1] ) logger.info( 'Best validation model epoch: {0}'.format( epoch_best_vali_metric + 1) ) logger.info( 'Best validation model {0} on validation set {1}: {2}'.format( validation_metric, validation_field, best_vali_metric )) if test_set is not None: best_vali_metric_epoch_test_metric = train_testset_stats[ validation_field][validation_metric][ epoch_best_vali_metric] logger.info( 'Best validation model {0} on test set {1}: {2}'.format( validation_metric, validation_field, best_vali_metric_epoch_test_metric ) ) logger.info( '\nFinished: {0}_{1}'.format(experiment_name, model_name)) logger.info('Saved to: {0}'.format(output_directory)) contrib_command("train_save", output_directory) self.training_set_metadata = training_set_metadata if not skip_save_model: # Load the best weights from saved checkpoint self.load_weights(model_dir) return train_stats, preprocessed_data, output_directory