def test_getting_single_predictions_nlp_date_multilabel_classification(): np.random.seed(0) df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset( ) column_descriptions = { 'airline_sentiment': 'output', 'airline': 'categorical', 'text': 'nlp', 'tweet_location': 'categorical', 'user_timezone': 'categorical', 'tweet_created': 'date' } ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions) ml_predictor.train(df_twitter_train) file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_twitter_test_dictionaries = df_twitter_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_twitter_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) print('predictions') print(predictions) first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = 0.73 assert lower_bound < first_score < 0.79 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_twitter_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.2 < duration.total_seconds() < 15 # 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_twitter_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) print('predictions') print(predictions) print('df_twitter_test_dictionaries') print(df_twitter_test_dictionaries) second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert lower_bound < second_score < 0.79
df_train, df_test = get_boston_dataset() # Tell auto_ml which column is 'output' # Also note columns that aren't purely numerical # Examples include ['nlp', 'date', 'categorical', 'ignore'] column_descriptions = { 'MEDV': 'output' , 'CHAS': 'categorical' } ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions) ml_predictor.train(df_train) # Score the model on test data test_score = ml_predictor.score(df_test, df_test.MEDV) # auto_ml is specifically tuned for running in production # It can get predictions on an individual row (passed in as a dictionary) # A single prediction like this takes ~1 millisecond # Here we will demonstrate saving the trained model, and loading it again file_name = ml_predictor.save() trained_model = load_ml_model(file_name) # .predict and .predict_proba take in either: # A pandas DataFrame # A list of dictionaries # A single dictionary (optimized for speed in production evironments) predictions = trained_model.predict(df_test) print(predictions)
def test_user_input_func_classification(): np.random.seed(0) df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset( ) def age_bucketing(data): def define_buckets(age): if age <= 17: return 'youth' elif age <= 40: return 'adult' elif age <= 60: return 'adult2' else: return 'over_60' if isinstance(data, dict): data['age_bucket'] = define_buckets(data['age']) else: data['age_bucket'] = data.age.apply(define_buckets) return data column_descriptions = { 'survived': 'output', 'sex': 'categorical', 'embarked': 'categorical', 'pclass': 'categorical', 'age_bucket': 'categorical' } ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions) ml_predictor.train(df_titanic_train, user_input_func=age_bucketing) file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_titanic_test_dictionaries = df_titanic_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good assert -0.16 < first_score < -0.135 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_titanic_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, # this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.05 < duration.total_seconds() < 15 # 3. make sure we're not modifying the dictionaries (the score is the same after # running a few experiments as it is the first time) predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) print('df_titanic_test_dictionaries') print(df_titanic_test_dictionaries) second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert -0.16 < second_score < -0.135
def test_feature_learning_categorical_ensembling_getting_single_predictions_regression( model_name=None): np.random.seed(0) df_boston_train, df_boston_test = utils.get_boston_regression_dataset() column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'} ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions) # NOTE: this is bad practice to pass in our same training set as our fl_data set, # but we don't have enough data to do it any other way df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2) ml_predictor.train_categorical_ensemble( df_boston_train, model_names=model_name, feature_learning=True, fl_data=fl_data, categorical_column='CHAS') # print('Score on training data') # ml_predictor.score(df_boston_train, df_boston_train.MEDV) file_name = ml_predictor.save(str(random.random())) from cash_ml.utils_models import load_ml_model saved_ml_pipeline = load_ml_model(file_name) # with open(file_name, 'rb') as read_file: # saved_ml_pipeline = dill.load(read_file) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_boston_test_dictionaries = df_boston_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_boston_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = -4.5 assert lower_bound < first_score < -3.4 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_boston_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, # this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.2 < duration.total_seconds() / 1.0 < 15 # 3. make sure we're not modifying the dictionaries # (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_boston_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert lower_bound < second_score < -3.4
def test_feature_learning_getting_single_predictions_classification(model_name=None): np.random.seed(0) df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset() column_descriptions = { 'survived': 'output', 'sex': 'categorical', 'embarked': 'categorical', 'pclass': 'categorical' } ml_predictor = Predictor( type_of_estimator='classifier', column_descriptions=column_descriptions) # NOTE: this is bad practice to pass in our same training set as our fl_data set, # but we don't have enough data to do it any other way df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2) ml_predictor.train( df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data) file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_titanic_test_dictionaries = df_titanic_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = -0.16 if model_name == 'DeepLearningClassifier': lower_bound = -0.187 assert lower_bound < first_score < -0.133 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_titanic_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, # this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.2 < duration.total_seconds() < 15 # 3. make sure we're not modifying the dictionaries # (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) print('df_titanic_test_dictionaries') print(df_titanic_test_dictionaries) second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert lower_bound < second_score < -0.133
def getting_single_predictions_classifier_test(): np.random.seed(0) df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset( ) column_descriptions = { 'survived': 'output', 'sex': 'categorical', 'embarked': 'categorical', 'pclass': 'categorical', 'age_bucket': 'categorical' } ensemble_config = [{ 'model_name': 'LGBMClassifier' }, { 'model_name': 'RandomForestClassifier' }] ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions) ml_predictor.train(df_titanic_train, ensemble_config=ensemble_config) file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_titanic_test_dictionaries = df_titanic_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = -0.16 assert -0.15 < first_score < -0.135 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_titanic_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.2 < duration.total_seconds() < 60 # 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_titanic_test_dictionaries: predictions.append(saved_ml_pipeline.predict_proba(row)[1]) print('predictions') print(predictions) print('df_titanic_test_dictionaries') print(df_titanic_test_dictionaries) second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert -0.15 < second_score < -0.135
def getting_single_predictions_regressor_test(): np.random.seed(0) df_boston_train, df_boston_test = utils.get_boston_regression_dataset() column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'} ensemble_config = [{ 'model_name': 'LGBMRegressor' }, { 'model_name': 'RandomForestRegressor' }] ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions) # NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way ml_predictor.train(df_boston_train, ensemble_config=ensemble_config) test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV) print('test_score') print(test_score) assert -3.5 < test_score < -2.8 file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_boston_test_dictionaries = df_boston_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_boston_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = -3.5 assert lower_bound < first_score < -2.8 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_boston_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.2 < duration.total_seconds() / 1.0 < 60 # 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_boston_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert lower_bound < second_score < -2.8
def test_ignores_new_invalid_features(): # One of the great unintentional features of cash_ml is that you can pass in new features at prediction time, that weren't present at training time, and they're silently ignored! # One edge case here is new features that are strange objects (lists, datetimes, intervals, or anything else that we can't process in our default data processing pipeline). Initially, we just ignored them in dict_vectorizer, but we need to ignore them earlier. np.random.seed(0) df_boston_train, df_boston_test = utils.get_boston_regression_dataset() column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'} ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions) ml_predictor.train(df_boston_train) file_name = ml_predictor.save(str(random.random())) saved_ml_pipeline = load_ml_model(file_name) os.remove(file_name) try: keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' os.remove(keras_file_name) except: pass df_boston_test_dictionaries = df_boston_test.to_dict('records') # 1. make sure the accuracy is the same predictions = [] for row in df_boston_test_dictionaries: if random.random() > 0.9: row['totally_new_feature'] = datetime.datetime.now() row['really_strange_feature'] = random.random row['we_should_really_ignore_this'] = Predictor row['pretty_vanilla_ignored_field'] = 8 row['potentially_confusing_things_here'] = float('nan') row['potentially_confusing_things_again'] = float('inf') row['this_is_a_list'] = [1, 2, 3, 4, 5] predictions.append(saved_ml_pipeline.predict(row)) print('predictions') print(predictions) print('predictions[0]') print(predictions[0]) print('type(predictions)') print(type(predictions)) first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('first_score') print(first_score) # Make sure our score is good, but not unreasonably good lower_bound = -3.0 assert lower_bound < first_score < -2.7 # 2. make sure the speed is reasonable (do it a few extra times) data_length = len(df_boston_test_dictionaries) start_time = datetime.datetime.now() for idx in range(1000): row_num = idx % data_length saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num]) end_time = datetime.datetime.now() duration = end_time - start_time print('duration.total_seconds()') print(duration.total_seconds()) # It's very difficult to set a benchmark for speed that will work across all machines. # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions # That's about 1 millisecond per prediction # Assuming we might be running on a test box that's pretty weak, multiply by 3 # Also make sure we're not running unreasonably quickly assert 0.1 < duration.total_seconds() / 1.0 < 15 # 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time) predictions = [] for row in df_boston_test_dictionaries: predictions.append(saved_ml_pipeline.predict(row)) second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions) print('second_score') print(second_score) # Make sure our score is good, but not unreasonably good assert lower_bound < second_score < -2.7