def main(args): '''Main function for AutoML in time-series predictions. Args: - data loading parameters: - data_names: mimic, ward, cf - preprocess parameters: - normalization: minmax, standard, None - one_hot_encoding: input features that need to be one-hot encoded - problem: 'one-shot' or 'online' - 'one-shot': one time prediction at the end of the time-series - 'online': preditcion at every time stamps of the time-series - max_seq_len: maximum sequence length after padding - label_name: the column name for the label(s) - treatment: the column name for treatments - imputation parameters: - static_imputation_model: mean, median, mice, missforest, knn, gain - temporal_imputation_model: mean, median, linear, quadratic, cubic, spline, mrnn, tgain - feature selection parameters: - feature_selection_model: greedy-addtion, greedy-deletion, recursive-addition, recursive-deletion, None - feature_number: selected featuer number - predictor_parameters: - epochs: number of epochs - bo_itr: bayesian optimization iterations - static_mode: how to utilize static features (concatenate or None) - time_mode: how to utilize time information (concatenate or None) - task: classification or regression - metric_name: auc, apr, mae, mse ''' #%% Step 0: Set basic parameters metric_sets = [args.metric_name] metric_parameters = { 'problem': args.problem, 'label_name': [args.label_name] } #%% Step 1: Upload Dataset # File names data_directory = '../datasets/data/' + args.data_name + '/' + args.data_name + '_' data_loader_training = CSVLoader( static_file=data_directory + 'static_train_data.csv.gz', temporal_file=data_directory + 'temporal_train_data_eav.csv.gz') data_loader_testing = CSVLoader( static_file=data_directory + 'static_test_data.csv.gz', temporal_file=data_directory + 'temporal_test_data_eav.csv.gz') dataset_training = data_loader_training.load() dataset_testing = data_loader_testing.load() print('Finish data loading.') #%% Step 2: Preprocess Dataset # (0) filter out negative values (Automatically) negative_filter = FilterNegative() # (1) one-hot encode categorical features onehot_encoder = OneHotEncoder( one_hot_encoding_features=[args.one_hot_encoding]) # (2) Normalize features: 3 options (minmax, standard, none) normalizer = Normalizer(args.normalization) filter_pipeline = PipelineComposer(negative_filter, onehot_encoder, normalizer) dataset_training = filter_pipeline.fit_transform(dataset_training) dataset_testing = filter_pipeline.transform(dataset_testing) print('Finish preprocessing.') #%% Step 3: Define Problem problem_maker = ProblemMaker(problem=args.problem, label=[args.label_name], max_seq_len=args.max_seq_len, treatment=[args.treatment]) dataset_training = problem_maker.fit_transform(dataset_training) dataset_testing = problem_maker.fit_transform(dataset_testing) print('Finish defining problem.') #%% Step 4: Impute Dataset static_imputation = Imputation( imputation_model_name=args.static_imputation_model, data_type='static') temporal_imputation = Imputation( imputation_model_name=args.temporal_imputation_model, data_type='temporal') imputation_pipeline = PipelineComposer(static_imputation, temporal_imputation) dataset_training = imputation_pipeline.fit_transform(dataset_training) dataset_testing = imputation_pipeline.transform(dataset_testing) print('Finish imputation.') #%% Step 5: Feature selection (4 options) static_feature_selection = \ FeatureSelection(feature_selection_model_name = args.static_feature_selection_model, feature_type = 'static', feature_number = args.static_feature_selection_number, task = args.task, metric_name = args.metric_name, metric_parameters = metric_parameters) temporal_feature_selection = \ FeatureSelection(feature_selection_model_name = args.temporal_feature_selection_model, feature_type = 'temporal', feature_number = args.temporal_feature_selection_number, task = args.task, metric_name = args.metric_name, metric_parameters = metric_parameters) feature_selection_pipeline = PipelineComposer(static_feature_selection, temporal_feature_selection) dataset_training = feature_selection_pipeline.fit_transform( dataset_training) dataset_testing = feature_selection_pipeline.transform(dataset_testing) print('Finish feature selection.') #%% Step 6: Bayesian Optimization ## Model define model_parameters = { 'projection_horizon': 5, 'static_mode': 'concatenate', 'time_mode': 'concatenate' } crn_model = CRN_Model(task=args.task) crn_model.set_params(**model_parameters) model_class = crn_model # train_validate split dataset_training.train_val_test_split(prob_val=0.2, prob_test=0.2) # Bayesian Optimization Start metric = BOMetric(metric='auc', fold=0, split='test') # Run BO for selected model class BO_model = AutoTS(dataset_training, model_class, metric) models, bo_score = BO_model.training_loop(num_iter=2) auto_ens_model = AutoEnsemble(models, bo_score) # Prediction assert not dataset_testing.is_validation_defined test_y_hat = auto_ens_model.predict(dataset_testing, test_split='test') test_y = dataset_testing.label print('Finish AutoML model training and testing.') #%% Step 7: Visualize Results idx = np.random.permutation(len(test_y_hat))[:2] # Evaluate predictor model result = Metrics(metric_sets, metric_parameters).evaluate(test_y, test_y_hat) print('Finish predictor model evaluation.') # Visualize the output # (1) Performance print('Overall performance') print_performance(result, metric_sets, metric_parameters) # (2) Predictions print('Each prediction') print_prediction(test_y_hat[idx], metric_parameters) return
if select_pred_task == 'Regression': metric_sets = ['mse', 'mae'] metric_parameters = { 'problem': problem_type, 'label_name': label_name, } metrics = Metrics(metric_sets, metric_parameters) result = metrics.evaluate(dataset_v_5.label, test_y_hat) if problem_type == 'one-shot': text = print_performance( result, metric_sets, metric_parameters, ) st.text(text) if problem_type == 'online': figs = print_performance( result, metric_sets, metric_parameters, ) for fig in figs: st.pyplot(fig, facecolor=fig.get_facecolor(), edgecolor='none')
def main(args): '''Main function for time-series prediction. Args: - data loading parameters: - data_names: mimic, ward, cf - preprocess parameters: - normalization: minmax, standard, None - one_hot_encoding: input features that need to be one-hot encoded - problem: 'one-shot' or 'online' - 'one-shot': one time prediction at the end of the time-series - 'online': preditcion at every time stamps of the time-series - max_seq_len: maximum sequence length after padding - label_name: the column name for the label(s) - treatment: the column name for treatments - imputation parameters: - static_imputation_model: mean, median, mice, missforest, knn, gain - temporal_imputation_model: mean, median, linear, quadratic, cubic, spline, mrnn, tgain - feature selection parameters: - feature_selection_model: greedy-addtion, greedy-deletion, recursive-addition, recursive-deletion, None - feature_number: selected featuer number - predictor_parameters: - model_name: rnn, gru, lstm, attention, tcn, transformer - model_parameters: network parameters such as numer of layers - h_dim: hidden dimensions - n_layer: layer number - n_head: head number (only for transformer model) - batch_size: number of samples in mini-batch - epochs: number of epochs - learning_rate: learning rate - static_mode: how to utilize static features (concatenate or None) - time_mode: how to utilize time information (concatenate or None) - task: classification or regression - uncertainty_model_name: uncertainty estimation model name (ensemble) - interpretor_model_name: interpretation model name (tinvase) - metric_name: auc, apr, mae, mse ''' #%% Step 0: Set basic parameters metric_sets = [args.metric_name] metric_parameters = { 'problem': args.problem, 'label_name': [args.label_name] } #%% Step 1: Upload Dataset # File names data_directory = '../datasets/data/' + args.data_name + '/' + args.data_name + '_' data_loader_training = CSVLoader( static_file=data_directory + 'static_train_data.csv.gz', temporal_file=data_directory + 'temporal_train_data_eav.csv.gz') data_loader_testing = CSVLoader( static_file=data_directory + 'static_test_data.csv.gz', temporal_file=data_directory + 'temporal_test_data_eav.csv.gz') dataset_training = data_loader_training.load() dataset_testing = data_loader_testing.load() print('Finish data loading.') #%% Step 2: Preprocess Dataset # (0) filter out negative values (Automatically) negative_filter = FilterNegative() # (1) one-hot encode categorical features onehot_encoder = OneHotEncoder( one_hot_encoding_features=[args.one_hot_encoding]) # (2) Normalize features: 3 options (minmax, standard, none) normalizer = Normalizer(args.normalization) filter_pipeline = PipelineComposer(negative_filter, onehot_encoder, normalizer) dataset_training = filter_pipeline.fit_transform(dataset_training) dataset_testing = filter_pipeline.transform(dataset_testing) print('Finish preprocessing.') #%% Step 3: Define Problem problem_maker = ProblemMaker(problem=args.problem, label=[args.label_name], max_seq_len=args.max_seq_len, treatment=args.treatment) dataset_training = problem_maker.fit_transform(dataset_training) dataset_testing = problem_maker.fit_transform(dataset_testing) print('Finish defining problem.') #%% Step 4: Impute Dataset static_imputation = Imputation( imputation_model_name=args.static_imputation_model, data_type='static') temporal_imputation = Imputation( imputation_model_name=args.temporal_imputation_model, data_type='temporal') imputation_pipeline = PipelineComposer(static_imputation, temporal_imputation) dataset_training = imputation_pipeline.fit_transform(dataset_training) dataset_testing = imputation_pipeline.transform(dataset_testing) print('Finish imputation.') #%% Step 5: Feature selection (4 options) static_feature_selection = \ FeatureSelection(feature_selection_model_name = args.static_feature_selection_model, feature_type = 'static', feature_number = args.static_feature_selection_number, task = args.task, metric_name = args.metric_name, metric_parameters = metric_parameters) temporal_feature_selection = \ FeatureSelection(feature_selection_model_name = args.temporal_feature_selection_model, feature_type = 'temporal', feature_number = args.temporal_feature_selection_number, task = args.task, metric_name = args.metric_name, metric_parameters = metric_parameters) feature_selection_pipeline = PipelineComposer(static_feature_selection, temporal_feature_selection) dataset_training = feature_selection_pipeline.fit_transform( dataset_training) dataset_testing = feature_selection_pipeline.transform(dataset_testing) print('Finish feature selection.') #%% Step 6: Fit and Predict (6 options) # Set predictor model parameters model_parameters = { 'h_dim': args.h_dim, 'n_layer': args.n_layer, 'n_head': args.n_head, 'batch_size': args.batch_size, 'epoch': args.epochs, 'model_type': args.model_name, 'learning_rate': args.learning_rate, 'static_mode': args.static_mode, 'time_mode': args.time_mode, 'verbose': True } # Set the validation data for best model saving dataset_training.train_val_test_split(prob_val=0.2, prob_test=0.0) pred_class = prediction(args.model_name, model_parameters, args.task) pred_class.fit(dataset_training) test_y_hat = pred_class.predict(dataset_testing) print('Finish predictor model training and testing.') #%% Step 7: Estimate Uncertainty (1 option) uncertainty_model = uncertainty(args.uncertainty_model_name, model_parameters, pred_class, args.task) uncertainty_model.fit(dataset_training) test_ci_hat = uncertainty_model.predict(dataset_testing) print('Finish uncertainty estimation') #%% Step 8: Interpret Predictions (1 option) interpretor = interpretation(args.interpretation_model_name, model_parameters, pred_class, args.task) interpretor.fit(dataset_training) test_s_hat = interpretor.predict(dataset_testing) print('Finish model interpretation') #%% Step 9: Visualize Results idx = np.random.permutation(len(test_y_hat))[:2] # Evaluate predictor model result = Metrics(metric_sets, metric_parameters).evaluate(dataset_testing.label, test_y_hat) print('Finish predictor model evaluation.') # Visualize the output # (1) Performance print('Overall performance') print_performance(result, metric_sets, metric_parameters) # (2) Predictions print('Each prediction') print_prediction(test_y_hat[idx], metric_parameters) # (3) Uncertainty print('Uncertainty estimations') print_uncertainty(test_y_hat[idx], test_ci_hat[idx], metric_parameters) # (4) Model interpretation print('Model interpretation') print_interpretation(test_s_hat[idx], dataset_training.feature_name, metric_parameters, model_parameters) return
def main(args): """Main function for AutoML in time-series predictions. Args: - data loading parameters: - data_names: mimic, ward, cf - preprocess parameters: - normalization: minmax, standard, None - one_hot_encoding: input features that need to be one-hot encoded - problem: 'one-shot' or 'online' - 'one-shot': one time prediction at the end of the time-series - 'online': preditcion at every time stamps of the time-series - max_seq_len: maximum sequence length after padding - label_name: the column name for the label(s) - treatment: the column name for treatments - imputation parameters: - static_imputation_model: mean, median, mice, missforest, knn, gain - temporal_imputation_model: mean, median, linear, quadratic, cubic, spline, mrnn, tgain - feature selection parameters: - feature_selection_model: greedy-addition, greedy-deletion, recursive-addition, recursive-deletion, None - feature_number: selected featuer number - predictor_parameters: - epochs: number of epochs - bo_itr: bayesian optimization iterations - static_mode: how to utilize static features (concatenate or None) - time_mode: how to utilize time information (concatenate or None) - task: classification or regression - metric_name: auc, apr, mae, mse """ #%% Step 0: Set basic parameters metric_sets = [args.metric_name] metric_parameters = { "problem": args.problem, "label_name": [args.label_name] } #%% Step 1: Upload Dataset # File names data_directory = "../datasets/data/" + args.data_name + "/" + args.data_name + "_" data_loader_training = CSVLoader( static_file=data_directory + "static_train_data.csv.gz", temporal_file=data_directory + "temporal_train_data_eav.csv.gz", ) data_loader_testing = CSVLoader( static_file=data_directory + "static_test_data.csv.gz", temporal_file=data_directory + "temporal_test_data_eav.csv.gz", ) dataset_training = data_loader_training.load() dataset_testing = data_loader_testing.load() print("Finish data loading.") #%% Step 2: Preprocess Dataset # (0) filter out negative values (Automatically) negative_filter = FilterNegative() # (1) one-hot encode categorical features onehot_encoder = OneHotEncoder( one_hot_encoding_features=[args.one_hot_encoding]) # (2) Normalize features: 3 options (minmax, standard, none) normalizer = Normalizer(args.normalization) filter_pipeline = PipelineComposer(negative_filter, onehot_encoder, normalizer) dataset_training = filter_pipeline.fit_transform(dataset_training) dataset_testing = filter_pipeline.transform(dataset_testing) print("Finish preprocessing.") #%% Step 3: Define Problem problem_maker = ProblemMaker(problem=args.problem, label=[args.label_name], max_seq_len=args.max_seq_len, treatment=args.treatment) dataset_training = problem_maker.fit_transform(dataset_training) dataset_testing = problem_maker.fit_transform(dataset_testing) print("Finish defining problem.") #%% Step 4: Impute Dataset static_imputation = Imputation( imputation_model_name=args.static_imputation_model, data_type="static") temporal_imputation = Imputation( imputation_model_name=args.temporal_imputation_model, data_type="temporal") imputation_pipeline = PipelineComposer(static_imputation, temporal_imputation) dataset_training = imputation_pipeline.fit_transform(dataset_training) dataset_testing = imputation_pipeline.transform(dataset_testing) print("Finish imputation.") #%% Step 5: Feature selection (4 options) static_feature_selection = FeatureSelection( feature_selection_model_name=args.static_feature_selection_model, feature_type="static", feature_number=args.static_feature_selection_number, task=args.task, metric_name=args.metric_name, metric_parameters=metric_parameters, ) temporal_feature_selection = FeatureSelection( feature_selection_model_name=args.temporal_feature_selection_model, feature_type="temporal", feature_number=args.temporal_feature_selection_number, task=args.task, metric_name=args.metric_name, metric_parameters=metric_parameters, ) feature_selection_pipeline = PipelineComposer(static_feature_selection, temporal_feature_selection) dataset_training = feature_selection_pipeline.fit_transform( dataset_training) dataset_testing = feature_selection_pipeline.transform(dataset_testing) print("Finish feature selection.") #%% Step 6: Bayesian Optimization ## Model define # RNN model rnn_parameters = { "model_type": "lstm", "epoch": args.epochs, "static_mode": args.static_mode, "time_mode": args.time_mode, "verbose": False, } general_rnn = GeneralRNN(task=args.task) general_rnn.set_params(**rnn_parameters) # CNN model cnn_parameters = { "epoch": args.epochs, "static_mode": args.static_mode, "time_mode": args.time_mode, "verbose": False, } temp_cnn = TemporalCNN(task=args.task) temp_cnn.set_params(**cnn_parameters) # Transformer transformer = TransformerPredictor(task=args.task, epoch=args.epochs, static_mode=args.static_mode, time_mode=args.time_mode) # Attention model attn_parameters = { "model_type": "lstm", "epoch": args.epochs, "static_mode": args.static_mode, "time_mode": args.time_mode, "verbose": False, } attn = Attention(task=args.task) attn.set_params(**attn_parameters) # model_class_list = [general_rnn, attn, temp_cnn, transformer] model_class_list = [general_rnn, attn] # train_validate split dataset_training.train_val_test_split(prob_val=0.2, prob_test=0.1) # Bayesian Optimization Start metric = BOMetric(metric="auc", fold=0, split="test") ens_model_list = [] # Run BO for each model class for m in model_class_list: BO_model = automl.model.AutoTS(dataset_training, m, metric, model_path="tmp/") models, bo_score = BO_model.training_loop(num_iter=args.bo_itr) auto_ens_model = AutoEnsemble(models, bo_score) ens_model_list.append(auto_ens_model) # Load all ensemble models for ens in ens_model_list: for m in ens.models: m.load_model(BO_model.model_path + "/" + m.model_id + ".h5") # Stacking algorithm stacking_ens_model = StackingEnsemble(ens_model_list) stacking_ens_model.fit(dataset_training, fold=0, train_split="val") # Prediction assert not dataset_testing.is_validation_defined test_y_hat = stacking_ens_model.predict(dataset_testing, test_split="test") test_y = dataset_testing.label print("Finish AutoML model training and testing.") #%% Step 7: Visualize Results idx = np.random.permutation(len(test_y_hat))[:2] # Evaluate predictor model result = Metrics(metric_sets, metric_parameters).evaluate(test_y, test_y_hat) print("Finish predictor model evaluation.") # Visualize the output # (1) Performance print("Overall performance") print_performance(result, metric_sets, metric_parameters) # (2) Predictions print("Each prediction") print_prediction(test_y_hat[idx], metric_parameters) return
def main(args): '''Main function for individual treatment effect estimation. Args: - data loading parameters: - data_names: mimic, ward, cf, mimic_antibiotics - preprocess parameters: - normalization: minmax, standard, None - one_hot_encoding: input features that need to be one-hot encoded - problem: 'online' - 'online': preiction at every time stamps of the time-series - max_seq_len: maximum sequence length after padding - label_name: the column name for the label(s) - treatment: the column name for treatments - imputation parameters: - static_imputation_model: mean, median, mice, missforest, knn, gain - temporal_imputation_model: mean, median, linear, quadratic, cubic, spline, mrnn, tgain - feature selection parameters: - feature_selection_model: greedy-addtion, greedy-deletion, recursive-addition, recursive-deletion, None - feature_number: selected featuer number - treatment effects model parameters: - model_name: CRN, RMSN, GANITE Each model has different types of hyperparameters that need to be set. - Parameters needed for the Counterfactual Recurrent Network (CRN): - hyperparameters for encoder: - rnn_hidden_units: hidden dimensions in the LSTM unit - rnn_keep_prob: keep probability used for variational dropout in the LSTM unit - br_size: size of the balancing representation - fc_hidden_units: hidden dimensions of the fully connected layers used for treatment classifier and predictor - batch_size: number of samples in mini-batch - num_epochs: number of epochs - learning_rate: learning rate - max_alpha: alpha controls the trade-off between building tratment invariant representations (domain discrimination) and being able to predict outcomes (outcome prediction); during training, CRN uses an exponentially increasing schedule for alpha from 0 to max_alpha. - hyperparameters for decoder: - the decoder requires the same hyperparameters as the encoder with the exception of the rnn_hidden_units which is set to be equal to the br_size of the encoder - Parameters for Recurrent Marginal Structural Networks (RMSN): - hyperparameters for encoder: - dropout_rate: dropout probability used for variational - rnn_hidden_units: hidden dimensions in the LSTM unit - batch_size: number of samples in mini-batch - num_epochs: number of epochs - learning_rate: learning rate - max_norm: max gradient norm used for gradient clipping during training - hyperparameters for decoder: - the decoder requires the same hyperparameters as the encoder. - model_dir: directory where the model is saved - model_name: name of the saved model - Parameters for GANITE: - batch size: number of samples in mini-batch - alpha: parameter trading off between discriminator loss and supervised loss for the generator training - learning_rate: learning rate - hidden_units: hidden dimensions of the fully connected layers used in the networks - stack_dim: number of timesteps to stack All models have the following common parameters: - static_mode: how to utilize static features (concatenate or None) - time_mode: how to utilize time information (concatenate or None) - taks: 'classification' or 'regression' - metric_name: auc, apr, mae, mse (used for factual prediction) - patient id: patient for which counterfactual trajectories are computed - timestep: timestep in patient trajectory for estimating counterfactuals ''' # %% Step 0: Set basic parameters metric_sets = [args.metric_name] metric_parameters = { 'problem': args.problem, 'label_name': [args.label_name] } # %% Step 1: Upload Dataset # File names data_directory = '../datasets/data/' + args.data_name + '/' + args.data_name + '_' data_loader_training = CSVLoader( static_file=data_directory + 'static_train_data.csv.gz', temporal_file=data_directory + 'temporal_train_data_eav.csv.gz') data_loader_testing = CSVLoader( static_file=data_directory + 'static_test_data.csv.gz', temporal_file=data_directory + 'temporal_test_data_eav.csv.gz') dataset_training = data_loader_training.load() dataset_testing = data_loader_testing.load() print('Finish data loading.') # %% Step 2: Preprocess Dataset # (0) filter out negative values (Automatically) negative_filter = FilterNegative() # (1) one-hot encode categorical features onehot_encoder = OneHotEncoder( one_hot_encoding_features=[args.one_hot_encoding]) # (2) Normalize features: 3 options (minmax, standard, none) normalizer = Normalizer(args.normalization) filter_pipeline = PipelineComposer(negative_filter, onehot_encoder, normalizer) dataset_training = filter_pipeline.fit_transform(dataset_training) dataset_testing = filter_pipeline.transform(dataset_testing) print('Finish preprocessing.') # %% Step 3: Define Problem problem_maker = ProblemMaker(problem=args.problem, label=[args.label_name], max_seq_len=args.max_seq_len, treatment=[args.treatment]) dataset_training = problem_maker.fit_transform(dataset_training) dataset_testing = problem_maker.fit_transform(dataset_testing) print('Finish defining problem.') # %% Step 4: Impute Dataset static_imputation = Imputation( imputation_model_name=args.static_imputation_model, data_type='static') temporal_imputation = Imputation( imputation_model_name=args.temporal_imputation_model, data_type='temporal') imputation_pipeline = PipelineComposer(static_imputation, temporal_imputation) dataset_training = imputation_pipeline.fit_transform(dataset_training) dataset_testing = imputation_pipeline.transform(dataset_testing) print('Finish imputation.') # %% Step 5: Feature selection (4 options) static_feature_selection = \ FeatureSelection(feature_selection_model_name=args.static_feature_selection_model, feature_type='static', feature_number=args.static_feature_selection_number, task=args.task, metric_name=args.metric_name, metric_parameters=metric_parameters) temporal_feature_selection = \ FeatureSelection(feature_selection_model_name=args.temporal_feature_selection_model, feature_type='temporal', feature_number=args.temporal_feature_selection_number, task=args.task, metric_name=args.metric_name, metric_parameters=metric_parameters) feature_selection_pipeline = PipelineComposer(static_feature_selection, temporal_feature_selection) dataset_training = feature_selection_pipeline.fit_transform( dataset_training) dataset_testing = feature_selection_pipeline.transform(dataset_testing) print('Finish feature selection.') # %% Step 6: Fit treatment effects (3 options) # Set the validation data for best model saving dataset_training.train_val_test_split(prob_val=0.2, prob_test=0.0) # Set the treatment effects model model_name = args.model_name # Set treatment effects model parameters if model_name == 'CRN': model_parameters = { 'encoder_rnn_hidden_units': args.crn_encoder_rnn_hidden_units, 'encoder_br_size': args.crn_encoder_br_size, 'encoder_fc_hidden_units': args.crn_encoder_fc_hidden_units, 'encoder_learning_rate': args.crn_encoder_learning_rate, 'encoder_batch_size': args.crn_encoder_batch_size, 'encoder_keep_prob': args.crn_encoder_keep_prob, 'encoder_num_epochs': args.crn_encoder_num_epochs, 'encoder_max_alpha': args.crn_encoder_max_alpha, 'decoder_br_size': args.crn_decoder_br_size, 'decoder_fc_hidden_units': args.crn_decoder_fc_hidden_units, 'decoder_learning_rate': args.crn_decoder_learning_rate, 'decoder_batch_size': args.crn_decoder_batch_size, 'decoder_keep_prob': args.crn_decoder_keep_prob, 'decoder_num_epochs': args.crn_decoder_num_epochs, 'decoder_max_alpha': args.crn_decoder_max_alpha, 'projection_horizon': args.projection_horizon, 'static_mode': args.static_mode, 'time_mode': args.time_mode } treatment_model = treatment_effects_model(model_name, model_parameters, task='classification') treatment_model.fit(dataset_training) elif model_name == 'RMSN': hyperparams_encoder_iptw = { 'dropout_rate': args.rmsn_encoder_dropout_rate, 'memory_multiplier': args.rmsn_encoder_memory_multiplier, 'num_epochs': args.rmsn_encoder_num_epochs, 'batch_size': args.rmsn_encoder_batch_size, 'learning_rate': args.rmsn_encoder_learning_rate, 'max_norm': args.rmsn_encoder_max_norm } hyperparams_decoder_iptw = { 'dropout_rate': args.rmsn_decoder_dropout_rate, 'memory_multiplier': args.rmsn_decoder_memory_multiplier, 'num_epochs': args.rmsn_decoder_num_epochs, 'batch_size': args.rmsn_decoder_batch_size, 'learning_rate': args.rmsn_decoder_learning_rate, 'max_norm': args.rmsn_decoder_max_norm } model_parameters = { 'hyperparams_encoder_iptw': hyperparams_encoder_iptw, 'hyperparams_decoder_iptw': hyperparams_decoder_iptw, 'model_dir': args.rmsn_model_dir, 'model_name': args.rmsn_model_name, 'static_mode': args.static_mode, 'time_mode': args.time_mode } treatment_model = treatment_effects_model(model_name, model_parameters, task='classification') treatment_model.fit(dataset_training, projection_horizon=args.projection_horizon) elif model_name == 'GANITE': hyperparams = { 'batch_size': args.ganite_batch_size, 'alpha': args.ganite_alpha, 'hidden_dims': args.ganite_hidden_dims, 'learning_rate': args.ganite_learning_rate } model_parameters = { 'hyperparams': hyperparams, 'stack_dim': args.ganite_stack_dim, 'static_mode': args.static_mode, 'time_mode': args.time_mode } treatment_model = treatment_effects_model(model_name, model_parameters, task='classification') treatment_model.fit(dataset_training) test_y_hat = treatment_model.predict(dataset_testing) print('Finish treatment effects model training and testing.') # %% Step 9: Visualize Results # Evaluate predictor model result = Metrics(metric_sets, metric_parameters).evaluate(dataset_testing.label, test_y_hat) print('Finish predictor model evaluation.') # Visualize the output # (1) Performance on estimating factual outcomes print('Overall performance on estimating factual outcomes') print_performance(result, metric_sets, metric_parameters) # (2) Counterfactual trajectories print('Counterfactual trajectories') if model_name in ['CRN', 'RMSN']: # Predict and visualize counterfactuals for the sequence of treatments indicated by the user # through the treatment_options. The lengths of each sequence of treatments needs to be projection_horizon + 1. treatment_options = np.array([[[1], [1], [1], [1], [1], [0]], [[0], [0], [0], [0], [1], [1]]]) history, counterfactual_traj = treatment_model.predict_counterfactual_trajectories( dataset=dataset_testing, patient_id=args.patient_id, timestep=args.timestep, treatment_options=treatment_options) print_counterfactual_predictions( patient_history=history, treatment_options=treatment_options, counterfactual_predictions=counterfactual_traj) return