def step(self, mode): if mode == "train" and self.mode == "test": raise Exception("Cannot train during test mode") if mode == "train": theano_fn = self.train_fn batch_gen = self.train_batch_gen elif mode == "test": theano_fn = self.test_fn batch_gen = self.test_batch_gen else: raise Exception("Invalid mode") data = next(batch_gen) ys = data[-1] data = data[:-1] ret = theano_fn(*data) return { "prediction": [metrics.get_estimate_custom(x, self.nbins) for x in ret[0]], "answers": ys, "current_loss": ret[1], "loss_reg": ret[2], "loss_mse": ret[1] - ret[2], "log": "" }
def predict(self, data): """ data is a pair (X, y) """ processed = self.process_input(data)[:-1] ret = self.test_fn(*processed) predictions = [ metrics.get_estimate_custom(x, self.nbins) for x in ret[0] ] return [predictions] + list(ret[1:])
def calc_metrics(self, data_gen, history, dataset, logs): y_true = [] predictions = [] # for i in range(data_gen.steps): # if self.verbose == 1: # print("\tdone {}/{}".format(i, data_gen.steps), end='\r') # (x, y_processed, y) = data_gen.getitem(i, return_y_true=True) # pred = self.model.predict(x, batch_size=self.batch_size, verbose=self.verbose) # pass # aflanders: debug # if i == 0: # print(f"type(x): {type(x)} type(self.batch_size): {type(self.batch_size)}") # print(f"tf.executing_eagerly():{tf.executing_eagerly()}") # aflanders: debug #pred = self.model.predict(x, batch_size=self.batch_size, verbose=self.verbose) pred = self.model.predict(data_gen, batch_size=self.batch_size, verbose=self.verbose, steps=data_gen.steps, workers=self.workers, use_multiprocessing=True) # if isinstance(x, list) and len(x) == 2: # deep supervision # if pred.shape[-1] == 1: # regression # pred_flatten = pred.flatten() # else: # classification # pred_flatten = pred.reshape((-1, 10)) # for m, t, p in zip(x[1].flatten(), y.flatten(), pred_flatten): # if np.equal(m, 1): # y_true.append(t) # predictions.append(p) # else: y = data_gen.get_y(len(pred)) # if pred.shape[-1] == 1: # y_true += list(y.flatten()) # predictions += list(pred.flatten()) # else: y_true += list(y) predictions += list(pred) print('\n') if self.partition == 'log': predictions = [ metrics.get_estimate_log(x, 10) for x in predictions ] ret = metrics.print_metrics_log_bins(y_true, predictions) if self.partition == 'custom': predictions = [ metrics.get_estimate_custom(x, 10) for x in predictions ] ret = metrics.print_metrics_custom_bins(y_true, predictions) if self.partition == 'none': ret = metrics.print_metrics_regression(y_true, predictions) for k, v in ret.items(): logs[dataset + '_' + k] = v history.append(ret)
def calc_metrics(self, data_gen, history, dataset, logs): y_true = [] predictions = [] for i in range(data_gen.steps): if self.verbose == 1: print("\tdone {}/{}".format(i, data_gen.steps), end='\r') if self.use_time: ([x, t], y_processed, y) = data_gen.next(return_y_true=True) pred = self.model.predict([x, t], batch_size=self.batch_size) else: (x, y_processed, y) = data_gen.next(return_y_true=True) pred = self.model.predict(x, batch_size=self.batch_size) if isinstance(x, list) and len(x) == 2: # deep supervision if pred.shape[-1] == 1: # regression pred_flatten = pred.flatten() else: # classification pred_flatten = pred.reshape((-1, 10)) for m, t, p in zip(x[1].flatten(), y.flatten(), pred_flatten): if np.equal(m, 1): y_true.append(t) predictions.append(p) else: if pred.shape[-1] == 1: y_true += list(y.flatten()) predictions += list(pred.flatten()) else: y_true += list(y) predictions += list(pred) print('\n') if self.partition == 'log': predictions = [ metrics.get_estimate_log(x, 10) for x in predictions ] ret = metrics.print_metrics_log_bins(y_true, predictions) if self.partition == 'custom': predictions = [ metrics.get_estimate_custom(x, 10) for x in predictions ] ret = metrics.print_metrics_custom_bins(y_true, predictions) if self.partition == 'none': ret = metrics.print_metrics_regression(y_true, predictions) for k, v in ret.items(): logs[dataset + '_' + k] = v history.append(ret)
pred = model.predict_on_batch(x) predictions += list(pred) labels += list(y) names += list(cur_names) ts += list(cur_ts) if stochastic: aleatoric = [np.mean(x * (1. - x), axis=0) for x in predictions] epistemic = [np.var(x, axis=0) for x in predictions] predictions = [np.mean(x, axis=0) for x in predictions] if args.partition == 'log': predictions = [metrics.get_estimate_log(x, 10) for x in predictions] metrics.print_metrics_log_bins(labels, predictions) if args.partition == 'custom': predictions = [metrics.get_estimate_custom(x, 10) for x in predictions] metrics.print_metrics_custom_bins(labels, predictions) if args.partition == 'none': metrics.print_metrics_regression(labels, predictions) predictions = [x[0] for x in predictions] path = os.path.join( os.path.join(args.output_dir, "test_predictions", os.path.basename(args.load_state)) + ".csv") if stochastic: ee = np.mean(np.array(epistemic)) aa = np.mean(np.array(aleatoric)) print("Epistemic uncertainty =", ee) print("Aleatoric uncertainty =", aa) print("Uncertainty =", ee + aa)
## decomp if args.decomp_C > 0: print "\n ================ decompensation ================" decomp_pred = np.array(decomp_pred) decomp_pred = np.stack([1-decomp_pred, decomp_pred], axis=1) decomp_ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred) ## los if args.los_C > 0: print "\n ================ length of stay ================" if args.partition == 'log': los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred] los_ret = metrics.print_metrics_log_bins(los_y_true, los_pred) if args.partition == 'custom': los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred] los_ret = metrics.print_metrics_custom_bins(los_y_true, los_pred) if args.partition == 'none': los_ret = metrics.print_metrics_regression(los_y_true, los_pred) ## pheno if args.pheno_C > 0: print "\n =================== phenotype ==================" pheno_pred = np.array(pheno_pred) pheno_ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred) # TODO: save activations if needed elif args.mode == 'test_single': # ensure that the code uses test_reader del train_reader
resfile.write(",".join(["%.6f" % x for x in ret['auc_scores']]) + "\n") print "\nAverage results on train" ret = metrics.print_metrics_multilabel(train_y, train_activations) write_results(resfile, ret) print "\nAverage results on val" ret = metrics.print_metrics_multilabel(val_y, val_activations) write_results(resfile, ret) print "\nAverage results on test" ret = metrics.print_metrics_multilabel(test_y, test_activations) write_results(resfile, ret) train_predictions = np.array([metrics.get_estimate_custom(x, nbins) for x in train_activations]) val_predictions = np.array([metrics.get_estimate_custom(x, nbins) for x in val_activations]) test_predictions = np.array([metrics.get_estimate_custom(x, nbins) for x in test_activations]) with open(os.path.join("cf_activations", model_name + ".txt"), "w") as actfile: for (x, y) in zip(test_predictions, test_actual): actfile.write("%.6f %.6f\n" % (x, y)) with open(os.path.join("cf_results", model_name + ".txt"), "w") as resfile: resfile.write("mad, mse, mape, kappa\n") print "Scores on train set" ret = metrics.print_metrics_custom_bins(train_actual, train_predictions) resfile.write("%.6f,%.6f,%.6f,%.6f\n" % ( ret['mad'], ret['mse'],
def calc_metrics(self, data_gen, history, dataset, logs): ihm_y_true = [] decomp_y_true = [] los_y_true = [] pheno_y_true = [] ihm_pred = [] decomp_pred = [] los_pred = [] pheno_pred = [] for i in range(data_gen.steps): if self.verbose == 1: print("\tdone {}/{}".format(i, data_gen.steps), end='\r') (X, y, los_y_reg) = data_gen.next(return_y_true=True) outputs = self.model.predict(X, batch_size=self.batch_size) ihm_M = X[1] decomp_M = X[2] los_M = X[3] if not data_gen.target_repl: # no target replication (ihm_p, decomp_p, los_p, pheno_p) = outputs (ihm_t, decomp_t, los_t, pheno_t) = y else: # target replication (ihm_p, _, decomp_p, los_p, pheno_p, _) = outputs (ihm_t, _, decomp_t, los_t, pheno_t, _) = y los_t = los_y_reg # real value not the label # ihm for (m, t, p) in zip(ihm_M.flatten(), ihm_t.flatten(), ihm_p.flatten()): if np.equal(m, 1): ihm_y_true.append(t) ihm_pred.append(p) # decomp for (m, t, p) in zip(decomp_M.flatten(), decomp_t.flatten(), decomp_p.flatten()): if np.equal(m, 1): decomp_y_true.append(t) decomp_pred.append(p) # los if los_p.shape[-1] == 1: # regression for (m, t, p) in zip(los_M.flatten(), los_t.flatten(), los_p.flatten()): if np.equal(m, 1): los_y_true.append(t) los_pred.append(p) else: # classification for (m, t, p) in zip(los_M.flatten(), los_t.flatten(), los_p.reshape((-1, 10))): if np.equal(m, 1): los_y_true.append(t) los_pred.append(p) # pheno for (t, p) in zip(pheno_t.reshape((-1, 25)), pheno_p.reshape((-1, 25))): pheno_y_true.append(t) pheno_pred.append(p) print('\n') # ihm print("\n ================= 48h mortality ================") ihm_pred = np.array(ihm_pred) ihm_pred = np.stack([1 - ihm_pred, ihm_pred], axis=1) ret = metrics.print_metrics_binary(ihm_y_true, ihm_pred) for k, v in ret.items(): logs[dataset + '_ihm_' + k] = v # decomp print("\n ================ decompensation ================") decomp_pred = np.array(decomp_pred) decomp_pred = np.stack([1 - decomp_pred, decomp_pred], axis=1) ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred) for k, v in ret.items(): logs[dataset + '_decomp_' + k] = v # los print("\n ================ length of stay ================") if self.partition == 'log': los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred] ret = metrics.print_metrics_log_bins(los_y_true, los_pred) if self.partition == 'custom': los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred] ret = metrics.print_metrics_custom_bins(los_y_true, los_pred) if self.partition == 'none': ret = metrics.print_metrics_regression(los_y_true, los_pred) for k, v in ret.items(): logs[dataset + '_los_' + k] = v # pheno print("\n =================== phenotype ==================") pheno_pred = np.array(pheno_pred) ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred) for k, v in ret.items(): logs[dataset + '_pheno_' + k] = v history.append(logs)
ihm_ret = metrics.print_metrics_binary(ihm_y_true, ihm_pred) # decomp if args.decomp_C > 0: print "\n ================ decompensation ================" decomp_pred = np.array(decomp_pred) decomp_ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred) # los if args.los_C > 0: print "\n ================ length of stay ================" if args.partition == 'log': los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred] los_ret = metrics.print_metrics_log_bins(los_y_true, los_pred) if args.partition == 'custom': los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred] los_ret = metrics.print_metrics_custom_bins(los_y_true, los_pred) if args.partition == 'none': los_ret = metrics.print_metrics_regression(los_y_true, los_pred) # pheno if args.pheno_C > 0: print "\n =================== phenotype ==================" pheno_pred = np.array(pheno_pred) pheno_ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred) print "Saving the predictions in test_predictions/task directories ..." # ihm ihm_path = os.path.join("test_predictions/ihm", os.path.basename(args.load_state)) + ".csv" ihm_utils.save_results(ihm_names, ihm_pred, ihm_y_true, ihm_path)
for x in ret['auc_scores']]) + "\n") print "\nAverage results on train" ret = metrics.print_metrics_multilabel(train_y, train_activations) write_results(resfile, ret) print "\nAverage results on val" ret = metrics.print_metrics_multilabel(val_y, val_activations) write_results(resfile, ret) print "\nAverage results on test" ret = metrics.print_metrics_multilabel(test_y, test_activations) write_results(resfile, ret) train_predictions = np.array( [metrics.get_estimate_custom(x, nbins) for x in train_activations]) val_predictions = np.array( [metrics.get_estimate_custom(x, nbins) for x in val_activations]) test_predictions = np.array( [metrics.get_estimate_custom(x, nbins) for x in test_activations]) with open(os.path.join("cf_activations", model_name + ".txt"), "w") as actfile: for (x, y) in zip(test_predictions, test_actual): actfile.write("%.6f %.6f\n" % (x, y)) with open(os.path.join("cf_results", model_name + ".txt"), "w") as resfile: resfile.write("mad, mse, mape, kappa\n") print "Scores on train set" ret = metrics.print_metrics_custom_bins(train_actual,
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) parser.add_argument('--grid-search', dest='grid_search', action='store_true') parser.add_argument('--no-grid-search', dest='grid_search', action='store_false') parser.set_defaults(grid_search=False) parser.add_argument('--data', type=str, help='Path to the data of length-of-stay task', default=os.path.join(os.path.dirname(__file__), '../../../data/length-of-stay/')) parser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored', default='.') args = parser.parse_args() print(args) if args.grid_search: penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] coefs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] else: penalties = ['l2'] coefs = [0.00001] train_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv')) test_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'test'), listfile=os.path.join(args.data, 'test_listfile.csv')) print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_actual, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_actual, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_actual, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print("train set shape: {}".format(train_X.shape)) print("validation set shape: {}".format(val_X.shape)) print("test set shape: {}".format(test_X.shape)) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) result_dir = os.path.join(args.output_dir, 'cf_results') common_utils.create_directory(result_dir) for (penalty, C) in zip(penalties, coefs): model_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) train_activations = np.zeros(shape=train_y.shape, dtype=float) val_activations = np.zeros(shape=val_y.shape, dtype=float) test_activations = np.zeros(shape=test_y.shape, dtype=float) for task_id in range(n_bins): logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y[:, task_id]) train_preds = logreg.predict_proba(train_X) train_activations[:, task_id] = train_preds[:, 1] val_preds = logreg.predict_proba(val_X) val_activations[:, task_id] = val_preds[:, 1] test_preds = logreg.predict_proba(test_X) test_activations[:, task_id] = test_preds[:, 1] train_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in train_activations]) val_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in val_activations]) test_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in test_activations]) with open(os.path.join(result_dir, 'train_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(train_actual, train_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join(result_dir, 'val_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(val_actual, val_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join(result_dir, 'test_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(test_actual, test_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) save_results(test_names, test_ts, test_predictions, test_actual, os.path.join(args.output_dir, 'cf_predictions', model_name + '.csv'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) args = parser.parse_args() print(args) # penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] # Cs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] penalties = ['l2'] Cs = [0.00001] train_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/train_listfile.csv') val_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/val_listfile.csv') test_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/test/', listfile='../../../data/length-of-stay/test_listfile.csv') print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_actual, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_actual, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_actual, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print("train set shape: {}".format(train_X.shape)) print("validation set shape: {}".format(val_X.shape)) print("test set shape: {}".format(test_X.shape)) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) common_utils.create_directory('cf_results') for (penalty, C) in zip(penalties, Cs): model_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) train_activations = np.zeros(shape=train_y.shape, dtype=float) val_activations = np.zeros(shape=val_y.shape, dtype=float) test_activations = np.zeros(shape=test_y.shape, dtype=float) for task_id in range(n_bins): logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y[:, task_id]) train_preds = logreg.predict_proba(train_X) train_activations[:, task_id] = train_preds[:, 1] val_preds = logreg.predict_proba(val_X) val_activations[:, task_id] = val_preds[:, 1] test_preds = logreg.predict_proba(test_X) test_activations[:, task_id] = test_preds[:, 1] train_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in train_activations]) val_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in val_activations]) test_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in test_activations]) with open(os.path.join('cf_results', 'train_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(train_actual, train_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join('cf_results', 'val_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(val_actual, val_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join('cf_results', 'test_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(test_actual, test_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) save_results(test_names, test_ts, test_predictions, test_actual, os.path.join('cf_predictions', model_name + '.csv'))
for i in range(test_data_gen.steps): print "\rpredicting {} / {}".format(i, test_data_gen.steps), ret = test_data_gen.next(return_y_true=True) (x, y_processed, y) = ret["data"] cur_names = ret["names"] cur_ts = ret["ts"] x = np.array(x) pred = model.predict_on_batch(x) predictions += list(pred) labels += list(y) names += list(cur_names) ts += list(cur_ts) if args.partition == 'log': predictions = [metrics.get_estimate_log(x, 10) for x in predictions] metrics.print_metrics_log_bins(labels, predictions) if args.partition == 'custom': predictions = [metrics.get_estimate_custom(x, 10) for x in predictions] metrics.print_metrics_custom_bins(labels, predictions) if args.partition == 'none': metrics.print_metrics_regression(labels, predictions) predictions = [x[0] for x in predictions] path = os.path.join("test_predictions", os.path.basename(args.load_state)) + ".csv" utils.save_results(names, ts, predictions, labels, path) else: raise ValueError("Wrong value for args.mode")