def read_and_extract_features(args, partition): data_folder = os.path.join(args.data, partition) reader = LengthOfStayReader( dataset_dir=data_folder, listfile=os.path.join(data_folder, 'listfile.csv'), fixed_time=args.period_length) ret = common_utils.read_chunk(reader, reader.get_number_of_examples()) patients = np.array(ret["patient"], dtype=int) ret["meta"] = np.stack(ret["meta"]) X = common_utils.extract_features_from_rawdata(ret['X'], ret['header'], period="all", features=args.features) # Check that the period of observation time is the same for all observations period_of_obs = np.mean(ret["t"]) print("Period of observation", period_of_obs, np.var(ret["t"])) assert np.var(ret["t"]) < 1e-3 # Augment data with missing columns missing_flags = np.isnan(X) # Also add in the metadata (age, ethnicity, gender) augmented_X = np.concatenate([ret["meta"], X, missing_flags], axis=1) y = np.array(ret['y']).reshape((-1,1)) + period_of_obs log_y = np.log(y) return augmented_X, log_y, patients
bins = np.array([one_hot(metrics.get_bin_custom(x, nbins)) for x in ys]) return (Xs, bins, ys) print "==> reading data and extracting features" chunk_size = 100000 # TODO: bigger chunk_size prev_time = time.time() (train_X, train_y, train_actual) = read_and_extract_features(train_reader, chunk_size) del train_reader (val_X, val_y, val_actual) = read_and_extract_features(val_reader, chunk_size) del val_reader (test_X, test_y, test_actual) = read_and_extract_features(test_reader, test_reader.get_number_of_examples()) del test_reader print "==> elapsed time = %.3f" % (time.time() - prev_time) print "train.shape ", train_X.shape, train_y.shape print "val.shape", val_X.shape, val_y.shape print "test.shape", test_X.shape, test_y.shape print "==> imputing missing values" imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=[ 'first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all' ]) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) parser.add_argument('--data', type=str, help='Path to the data of length-of-stay task', default=os.path.join(os.path.dirname(__file__), '../../../data/length-of-stay/')) parser.add_argument( '--output_dir', type=str, help='Directory relative which all output files are stored', default='.') args = parser.parse_args() print(args) train_reader = LengthOfStayReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = LengthOfStayReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv')) test_reader = LengthOfStayReader( dataset_dir=os.path.join(args.data, 'test'), listfile=os.path.join(args.data, 'test_listfile.csv')) print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_names, train_ts) = read_and_extract_features(train_reader, n_train, args.period, args.features) (val_X, val_y, val_names, val_ts) = read_and_extract_features(val_reader, n_val, args.period, args.features) (test_X, test_y, test_names, test_ts) = read_and_extract_features(test_reader, test_reader.get_number_of_examples(), args.period, args.features) print(train_X.shape) assert False print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) file_name = "{}.{}".format(args.period, args.features) linreg = LinearRegression() linreg.fit(train_X, train_y) result_dir = os.path.join(args.output_dir, 'results') common_utils.create_directory(result_dir) with open(os.path.join(result_dir, 'train_{}.json'.format(file_name)), "w") as res_file: ret = print_metrics_regression(train_y, linreg.predict(train_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) with open(os.path.join(result_dir, 'val_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_regression(val_y, linreg.predict(val_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) prediction = linreg.predict(test_X) with open(os.path.join(result_dir, 'test_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_regression(test_y, prediction) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) save_results( test_names, test_ts, prediction, test_y, os.path.join(args.output_dir, 'predictions', file_name + '.csv'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) parser.add_argument('--grid-search', dest='grid_search', action='store_true') parser.add_argument('--no-grid-search', dest='grid_search', action='store_false') parser.set_defaults(grid_search=False) parser.add_argument('--data', type=str, help='Path to the data of length-of-stay task', default=os.path.join(os.path.dirname(__file__), '../../../data/length-of-stay/')) parser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored', default='.') args = parser.parse_args() print(args) if args.grid_search: penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] coefs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] else: penalties = ['l2'] coefs = [0.00001] train_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv')) test_reader = LengthOfStayReader(dataset_dir=os.path.join(args.data, 'test'), listfile=os.path.join(args.data, 'test_listfile.csv')) print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_actual, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_actual, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_actual, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print("train set shape: {}".format(train_X.shape)) print("validation set shape: {}".format(val_X.shape)) print("test set shape: {}".format(test_X.shape)) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) result_dir = os.path.join(args.output_dir, 'cf_results') common_utils.create_directory(result_dir) for (penalty, C) in zip(penalties, coefs): model_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) train_activations = np.zeros(shape=train_y.shape, dtype=float) val_activations = np.zeros(shape=val_y.shape, dtype=float) test_activations = np.zeros(shape=test_y.shape, dtype=float) for task_id in range(n_bins): logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y[:, task_id]) train_preds = logreg.predict_proba(train_X) train_activations[:, task_id] = train_preds[:, 1] val_preds = logreg.predict_proba(val_X) val_activations[:, task_id] = val_preds[:, 1] test_preds = logreg.predict_proba(test_X) test_activations[:, task_id] = test_preds[:, 1] train_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in train_activations]) val_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in val_activations]) test_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in test_activations]) with open(os.path.join(result_dir, 'train_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(train_actual, train_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join(result_dir, 'val_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(val_actual, val_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join(result_dir, 'test_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(test_actual, test_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) save_results(test_names, test_ts, test_predictions, test_actual, os.path.join(args.output_dir, 'cf_predictions', model_name + '.csv'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) args = parser.parse_args() print(args) train_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/train_listfile.csv') val_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/val_listfile.csv') test_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/test/', listfile='../../../data/length-of-stay/test_listfile.csv') print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) file_name = "{}.{}".format(args.period, args.features) linreg = LinearRegression() linreg.fit(train_X, train_y) common_utils.create_directory('results') with open(os.path.join("results", 'train_{}.json'.format(file_name)), "w") as res_file: ret = print_metrics_regression(train_y, linreg.predict(train_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) with open(os.path.join('results', 'val_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_regression(val_y, linreg.predict(val_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) prediction = linreg.predict(test_X) with open(os.path.join('results', 'test_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_regression(test_y, prediction) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) save_results(test_names, test_ts, prediction, test_y, os.path.join('predictions', file_name + '.csv'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) args = parser.parse_args() print(args) # penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] # Cs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] penalties = ['l2'] Cs = [0.00001] train_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/train_listfile.csv') val_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/train/', listfile='../../../data/length-of-stay/val_listfile.csv') test_reader = LengthOfStayReader(dataset_dir='../../../data/length-of-stay/test/', listfile='../../../data/length-of-stay/test_listfile.csv') print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_actual, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_actual, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_actual, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print("train set shape: {}".format(train_X.shape)) print("validation set shape: {}".format(val_X.shape)) print("test set shape: {}".format(test_X.shape)) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) common_utils.create_directory('cf_results') for (penalty, C) in zip(penalties, Cs): model_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) train_activations = np.zeros(shape=train_y.shape, dtype=float) val_activations = np.zeros(shape=val_y.shape, dtype=float) test_activations = np.zeros(shape=test_y.shape, dtype=float) for task_id in range(n_bins): logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y[:, task_id]) train_preds = logreg.predict_proba(train_X) train_activations[:, task_id] = train_preds[:, 1] val_preds = logreg.predict_proba(val_X) val_activations[:, task_id] = val_preds[:, 1] test_preds = logreg.predict_proba(test_X) test_activations[:, task_id] = test_preds[:, 1] train_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in train_activations]) val_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in val_activations]) test_predictions = np.array([metrics.get_estimate_custom(x, n_bins) for x in test_activations]) with open(os.path.join('cf_results', 'train_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(train_actual, train_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join('cf_results', 'val_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(val_actual, val_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) with open(os.path.join('cf_results', 'test_{}.json'.format(model_name)), 'w') as f: ret = metrics.print_metrics_custom_bins(test_actual, test_predictions) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, f) save_results(test_names, test_ts, test_predictions, test_actual, os.path.join('cf_predictions', model_name + '.csv'))
process_one_chunk("train", chunk_index) cnt_trained = chunk_index - n_trained_chunks + 1 if (cnt_trained % 5 == 0): val_loss = process_one_chunk("test", chunk_index) if ((cnt_trained / 5) % args.save_every == 0): state_name = 'states/%s.chunk%d.test%.8f.state' % ( network_name, chunk_index, val_loss) print "==> saving ... %s" % state_name network.save_params(state_name, chunk_index) print "chunk %d took %.3fs" % (chunk_index, float(time.time()) - start_time) chunks_per_epoch = train_reader.get_number_of_examples() // chunk_size if (cnt_trained % chunks_per_epoch == 0): train_reader.random_shuffle() val_reader.random_shuffle() elif args.mode == 'test': # ensure that the code uses test_reader del train_reader del val_reader test_reader = LengthOfStayReader( dataset_dir='../../data/length-of-stay/test/', listfile='../../data/length-of-stay/test_listfile.csv') n_batches = test_reader.get_number_of_examples() // args.batch_size y_true = []
# ensure that the code uses test_reader del train_data_gen del val_data_gen if args.deep_supervision: del train_data_loader del val_data_loader else: del train_reader del val_reader test_reader = LengthOfStayReader( dataset_dir='../../data/length-of-stay/test/', listfile='../../data/length-of-stay/test_listfile.csv') test_nbatches = test_reader.get_number_of_examples() // args.batch_size test_nbatches = 10000 test_data_gen = utils.BatchGen(reader=test_reader, discretizer=discretizer, normalizer=normalizer, partition=args.partition, batch_size=args.batch_size, steps=test_nbatches) labels = [] predictions = [] for i in range(test_nbatches): print "\rpredicting {} / {}".format(i, test_nbatches), x, y_processed, y = test_data_gen.next(return_y_true=True) x = np.array(x) pred = model.predict_on_batch(x) predictions += list(pred)