def upload_dataset_to_project(con: h2o.Client, project_key: str, dataset_file: str, dataset_type: str): """ Uploads the data provided in dataset_file path to Driverless AI and links to the project. If the project already has a dataset of the specified type and filename linked, then it is not re-uploaded. For the uploaded dataset, the dataset_key of the newly uploaded dataset is returned. If it is not uploaded, then key of the dataset matching the file name is returned. :param con: Connection to H2O Driverless AI :param project_key: Key of the project to link the dataset to :param dataset_file: File path of the dataset to upload and link to project :param dataset_type: Either 'Training' or 'Testing' :return: dataset_key """ file_name = os.path.basename(dataset_file) datasets = con.get_datasets_for_project(project_key, dataset_type) dataset = next((x for x in datasets if x.name == file_name), None) if dataset is None: dataset = con.upload_dataset_sync(file_path=dataset_file) con.link_dataset_to_project(project_key=project_key, dataset_key=dataset.key, dataset_type=dataset_type) return dataset.key
def test_debug_pyclient(): from h2oai_client import Client pd.set_option('display.max_rows', 50) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) # Login info dai_url = "http://****:12345" dai_user = "******" dai_pwd = "****" # Data Information data_file_name = "****.csv" y = "****" # Transformers information transformer_file_name = "****.py" transformers_noncustom = [] transformers_custom_nontesting = [] # All Offical Transformers transformers_noncustom = ['CVCatNumEncode', 'CVTargetEncode' , 'CatOriginalTransformer', 'ClusterDistTransformer' , 'ClusterIdTransformer', 'ClusterTETransformer', 'DatesTransformer' , 'EwmaLagsTransformer', 'FrequentTransformer', 'InteractionsTransformer' , 'IsHolidayTransformer', 'LagsAggregatesTransformer', 'LagsInteractionTransformer' , 'LagsTransformer', 'LexiLabelEncoder', 'NumCatTETransformer', 'NumToCatTETransformer' , 'NumToCatWoEMonotonicTransformer', 'NumToCatWoETransformer', 'OneHotEncodingTransformer' , 'OriginalTransformer', 'SortedLETransformer', 'StrFeatureTransformer', 'TextClustDistTransformer' , 'TextClustTETransformer', 'TextLinModelTransformer', 'TextTransformer', 'TruncSVDNumTransformer' , 'WeightOfEvidenceTransformer'] # Any Installed Custom Transformers you don't want to test transformers_custom_nontesting = ['MyLogTransformer'] all_nontest_transformers = transformers_noncustom + transformers_custom_nontesting # STEP ZERO: Connect to Driverless AI h2oai = Client(dai_url, dai_user, dai_pwd) # STEP ONE: Load data set (and related tasks) # view all data sets in DAI all_data_sets = h2oai.list_datasets(0, 100) all_data_sets = pd.DataFrame({ 'key': list(map(lambda x: x.key, all_data_sets)) , 'name': list(map(lambda x: x.name, all_data_sets))}) print("PRE-LOADED DATASETS:") print(all_data_sets) # check if data was pre-loaded - if so use that data set - if not load data if data_file_name in all_data_sets['name'].values: print("\nData already loaded ", data_file_name) data_key = all_data_sets[all_data_sets["name"] == data_file_name]["key"][0] data_load_job = h2oai.get_dataset_job(data_key).entity else: print("\nLoading file ", data_file_name) data_load_job = h2oai.upload_dataset_sync(data_file_name) data_key = data_load_job.key # STEP TWO: Load custom transformer (and related tasks) # probably not good to just upload every time # no function to delete from python, only from ssh-ing in # rm tmp/contrib/transformers/[function]_randomletters_content.py print("\nUploading Transformer ", transformer_file_name) my_transformer = h2oai.upload_custom_recipe_sync(transformer_file_name) # returns true or false - exit if fails - check DAI UI for error message if my_transformer: print("\nTransformer uploaded successfully\n") else: print("\nTransformer uploaded failed, exiting program.\n") sys.exit() # STEP THREE: Run experiment (and related tasks) print("\nStarting Experiment\n") experiment = h2oai.start_experiment_sync( dataset_key=data_key , target_col=y , is_classification=True , accuracy=1 , time=1 , interpretability=10 , scorer="F1" , score_f_name=None , config_overrides=""" feature_brain_level=0 exclude_transformers={dont_use} """.format(dont_use=all_nontest_transformers) ) # experiment = h2oai.get_model_job("lomotare").entity # STEP FOUR: Check the transformation was used # Download Summary summary_path = h2oai.download(src_path=experiment.summary_path, dest_dir=".") dir_path = "h2oai_experiment_summary_" + experiment.key import zipfile with zipfile.ZipFile(summary_path, 'r') as z: z.extractall(dir_path) # View Features features = pd.read_table(dir_path + "/features.txt", sep=',', skipinitialspace=True) print(features) # STEP FIVE: Transform data and ensure it looks as expected transform = h2oai.fit_transform_batch_sync(model_key=experiment.key , training_dataset_key=data_key , validation_dataset_key=None , test_dataset_key=None , validation_split_fraction=0.25 , seed=1234 , fold_column=None) # Download the training and validation transformed data transform_train_path = h2oai.download(src_path=transform.training_output_csv_path, dest_dir=".") transform_validate_path = h2oai.download(src_path=transform.validation_output_csv_path, dest_dir=".") transform_train = pd.read_table(transform_train_path, sep=',', skipinitialspace=True) transform_validate = pd.read_table(transform_validate_path, sep=',', skipinitialspace=True) print(transform_train.head()) print(transform_validate.head()) # STEP 1000: Clean up os.remove(summary_path) os.remove(transform_train_path) os.remove(transform_validate_path) shutil.rmtree(dir_path)