ip = '35.175.227.14' address = 'http://' + ip + ':12345' username = '******' password = '******' h2oai = Client(address=address, username=username, password=password) ### Amaxon Reviews dataPath = '/data/Training/AmazonFineFoodReviews.csv' basename = 'Reviews' target = 'PositiveReview' ratio = 0.8 reviews_data = h2oai.create_dataset_sync(dataPath) # Split the data reviews_split_data = h2oai.make_dataset_split(dataset_key=reviews_data.key, output_name1=basename + "_train", output_name2=basename + "_test", target=target, fold_col="", time_col="", ratio=ratio, seed=1234) train_key = h2oai.get_dataset_split_job(reviews_split_data).entity[0] test_key = h2oai.get_dataset_split_job(reviews_split_data).entity[1] # Reviews Default
import h2oai_client from h2oai_client import Client h2oai = Client(address='http://129.213.63.69:12345', username='******', password='******') train = h2oai.create_dataset_sync('/train.csv') test = h2oai.create_dataset_sync('/test.csv') experiment = h2oai.start_experiment_sync(dataset_key=train.key, testset_key=test.key, accuracy=10, time=10, interpretability=1, is_classification=True, target_col='LABEL', is_timeseries=True, time_col='DATE', num_gap_periods=1, num_prediction_periods=1) print("Final Model Score on Validation Data: " + str(round(experiment.valid_score, 3))) print("Final Model Score on Test Data: " + str(round(experiment.test_score, 3)))
ip = '35.175.227.14' address = 'http://' + ip + ':12345' username = '******' password = '******' h2oai = Client(address = address , username = username , password = password) dataPath = '/data/Training/CreditCard.csv' basename = 'Card' target = 'Default' ratio = 0.8 dropped = [] card_data = h2oai.create_dataset_sync(dataPath) # Split the data card_split_data = h2oai.make_dataset_split( dataset_key = card_data.key , output_name1 = basename + "_train" , output_name2 = basename + "_test" , target = target , fold_col = "" , time_col = "" , ratio = ratio , seed = 1234 ) train_key = h2oai.get_dataset_split_job(card_split_data).entity[0] test_key = h2oai.get_dataset_split_job(card_split_data).entity[1]
import math from h2oai_client import Client, ModelParameters, InterpretParameters ip = '35.175.227.14' address = 'http://' + ip + ':12345' username = '******' password = '******' h2oai = Client(address=address, username=username, password=password) dataPath = '/data/Training/BostonHousing.csv' basename = 'Housing' target = 'VALUE' ratio = 0.8 boston_data = h2oai.create_dataset_sync(dataPath) # Split the data boston_split_data = h2oai.make_dataset_split(dataset_key=boston_data.key, output_name1=basename + "_train", output_name2=basename + "_test", target=target, fold_col="", time_col="", ratio=ratio, seed=1234) train_key = h2oai.get_dataset_split_job(boston_split_data).entity[0] test_key = h2oai.get_dataset_split_job(boston_split_data).entity[1] dropped = []
from h2oai_client import Client, ModelParameters, InterpretParameters ip = '35.175.227.14' address = 'http://' + ip + ':12345' username = '******' password = '******' h2oai = Client(address=address, username=username, password=password) ### Diabetes Models dataPath = '/data/Training/PimaDiabetes.csv' basename = 'Diabetes' target = 'Outcome' ratio = 0.8 diabetes_data = h2oai.create_dataset_sync(dataPath) # Split the data diabetes_split_data = h2oai.make_dataset_split(dataset_key=diabetes_data.key, output_name1=basename + "_train", output_name2=basename + "_test", target=target, fold_col="", time_col="", ratio=ratio, seed=1234) train_key = h2oai.get_dataset_split_job(diabetes_split_data).entity[0] test_key = h2oai.get_dataset_split_job(diabetes_split_data).entity[1] dropped = []
from h2oai_client import Client, ModelParameters, InterpretParameters ip = '35.175.227.14' address = 'http://' + ip + ':12345' username = '******' password = '******' h2oai = Client(address=address, username=username, password=password) ### Titanic Models dataPath = '/data/Training/Titanic.csv' basename = 'Titanic' target = 'survived' ratio = 0.8 titanic_data = h2oai.create_dataset_sync(dataPath) # Split the data titanic_split_data = h2oai.make_dataset_split(dataset_key=titanic_data.key, output_name1=basename + "_train", output_name2=basename + "_test", target=target, fold_col="", time_col="", ratio=ratio, seed=1234) train_key = h2oai.get_dataset_split_job(titanic_split_data).entity[0] test_key = h2oai.get_dataset_split_job(titanic_split_data).entity[1] knobs = [8, 2, 8]