"first25percent, first50percent, all") parser.add_argument('--features', type=str, default="all", help="all, len, all_but_len") # penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] # Cs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] penalties = ['l2'] Cs = [0.001] args = parser.parse_args() print args train_reader = DecompensationReader( dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader( dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/val_listfile.csv') test_reader = DecompensationReader( dataset_dir='../../../data/decompensation/test/', listfile='../../../data/decompensation/test_listfile.csv') def read_and_extract_features(reader, count): read_chunk_size = 1000 # assert (count % read_chunk_size == 0) Xs = []
# 4. Set the `tensorflow` pseudo-random generator at a fixed value tf.set_random_seed(seed_value) # 5. Configure a new global `tensorflow` session session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) if args.small_part: args.save_every = 2**30 # Build readers, discretizers, normalizers train_reader = DecompensationReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = DecompensationReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join( args.data, 'val_listfile.csv')) discretizer = OneHotEncoder(impute_strategy=args.imputation) discretizer_header = discretizer.transform( train_reader.read_example(0)["X"])[2].split(',') cont_channels = [ i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1 ] normalizer = Normalizer( fields=cont_channels) # choose here which columns to standardize
def main(): parser = argparse.ArgumentParser( description= 'Script for creating a normalizer state - a file which stores the ' 'means and standard deviations of columns of the output of a ' 'discretizer, which are later used to standardize the input of ' 'neural models.') parser.add_argument('--task', type=str, required=True, choices=['ihm', 'decomp', 'los', 'pheno', 'multi']) parser.add_argument( '--timestep', type=float, default=1.0, help="Rate of the re-sampling to discretize time-series.") parser.add_argument('--impute_strategy', type=str, default='previous', choices=['zero', 'next', 'previous', 'normal_value'], help='Strategy for imputing missing values.') parser.add_argument( '--start_time', type=str, choices=['zero', 'relative'], help= 'Specifies the start time of discretization. Zero means to use the beginning of ' 'the ICU stay. Relative means to use the time of the first ICU event') parser.add_argument( '--store_masks', dest='store_masks', action='store_true', help='Store masks that specify observed/imputed values.') parser.add_argument( '--no-masks', dest='store_masks', action='store_false', help='Do not store that specify specifying observed/imputed values.') parser.add_argument( '--n_samples', type=int, default=-1, help='How many samples to use to estimates means and ' 'standard deviations. Set -1 to use all training samples.') parser.add_argument('--output_dir', type=str, help='Directory where the output file will be saved.', default='.') parser.add_argument('--data', type=str, required=True, help='Path to the task data.') parser.set_defaults(store_masks=True) args = parser.parse_args() print(args) # create the reader reader = None dataset_dir = os.path.join(args.data, 'train') if args.task == 'ihm': reader = InHospitalMortalityReader(dataset_dir=dataset_dir, listfile=os.path.join( args.data, 'train_listfile.csv'), period_length=48.0) if args.task == 'decomp': reader = DecompensationReader(dataset_dir=dataset_dir, listfile=os.path.join( args.data, 'train_listfile.csv')) if args.task == 'los': reader = LengthOfStayReader(dataset_dir=dataset_dir, listfile=os.path.join( args.data, 'train_listfile.csv')) if args.task == 'pheno': reader = PhenotypingReader(dataset_dir=dataset_dir, listfile=os.path.join( args.data, 'train_listfile.csv')) if args.task == 'multi': reader = MultitaskReader(dataset_dir=dataset_dir, listfile=os.path.join(args.data, 'train_listfile.csv')) # create the discretizer discretizer = Discretizer(timestep=args.timestep, store_masks=args.store_masks, impute_strategy=args.impute_strategy, start_time=args.start_time) discretizer_header = reader.read_example(0)['header'] continuous_channels = [ i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1 ] # create the normalizer normalizer = Normalizer(fields=continuous_channels) # read all examples and store the state of the normalizer n_samples = args.n_samples if n_samples == -1: n_samples = reader.get_number_of_examples() for i in range(n_samples): if i % 1000 == 0: print('Processed {} / {} samples'.format(i, n_samples), end='\r') ret = reader.read_example(i) data, new_header = discretizer.transform(ret['X'], end=ret['t']) normalizer._feed_data(data) print('\n') file_name = '{}_ts:{:.2f}_impute:{}_start:{}_masks:{}_n:{}.normalizer'.format( args.task, args.timestep, args.impute_strategy, args.start_time, args.store_masks, n_samples) file_name = os.path.join(args.output_dir, file_name) print('Saving the state in {} ...'.format(file_name)) normalizer._save_params(file_name)
parser.add_argument('--batch_norm', type=bool, default=False, help='batch normalization') parser.add_argument('--timestep', type=float, default=0.8, help="fixed timestep used in the dataset") parser.add_argument('--small_part', dest='small_part', action='store_true') parser.add_argument('--whole_data', dest='small_part', action='store_false') parser.set_defaults(small_part=False) args = parser.parse_args() print args train_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') discretizer_header = discretizer.transform( train_reader.read_example(0)[0])[1].split(',') cont_channels = [ i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=[ 'first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all' ]) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) args = parser.parse_args() print(args) # penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] # Cs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] penalties = ['l2'] Cs = [0.001] train_reader = DecompensationReader( dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader( dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/val_listfile.csv') test_reader = DecompensationReader( dataset_dir='../../../data/decompensation/test/', listfile='../../../data/decompensation/test_listfile.csv') print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_names, train_ts) = read_and_extract_features(train_reader, n_train, args.period, args.features) (val_X, val_y, val_names, val_ts) = read_and_extract_features(val_reader, n_val, args.period, args.features) (test_X, test_y, test_names, test_ts) = read_and_extract_features(test_reader, test_reader.get_number_of_examples(), args.period, args.features) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) common_utils.create_directory('results') for (penalty, C) in zip(penalties, Cs): file_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y) with open(os.path.join('results', 'train_{}.json'.format(file_name)), "w") as res_file: ret = print_metrics_binary(train_y, logreg.predict_proba(train_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) with open(os.path.join('results', 'val_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_binary(val_y, logreg.predict_proba(val_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) prediction = logreg.predict_proba(test_X)[:, 1] with open(os.path.join('results', 'test_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_binary(test_y, prediction) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) save_results(test_names, test_ts, prediction, test_y, os.path.join('predictions', file_name + '.csv'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from', choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all']) parser.add_argument('--features', type=str, default='all', help='specifies what features to extract', choices=['all', 'len', 'all_but_len']) args = parser.parse_args() print(args) # penalties = ['l2', 'l2', 'l2', 'l2', 'l2', 'l2', 'l1', 'l1', 'l1', 'l1', 'l1'] # Cs = [1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 1.0, 0.1, 0.01, 0.001, 0.0001] penalties = ['l2'] Cs = [0.001] train_reader = DecompensationReader(dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader(dataset_dir='../../../data/decompensation/train/', listfile='../../../data/decompensation/val_listfile.csv') test_reader = DecompensationReader(dataset_dir='../../../data/decompensation/test/', listfile='../../../data/decompensation/test_listfile.csv') print('Reading data and extracting features ...') n_train = min(100000, train_reader.get_number_of_examples()) n_val = min(100000, val_reader.get_number_of_examples()) (train_X, train_y, train_names, train_ts) = read_and_extract_features( train_reader, n_train, args.period, args.features) (val_X, val_y, val_names, val_ts) = read_and_extract_features( val_reader, n_val, args.period, args.features) (test_X, test_y, test_names, test_ts) = read_and_extract_features( test_reader, test_reader.get_number_of_examples(), args.period, args.features) print('Imputing missing values ...') imputer = Imputer(missing_values=np.nan, strategy='mean', axis=0, verbose=0, copy=True) imputer.fit(train_X) train_X = np.array(imputer.transform(train_X), dtype=np.float32) val_X = np.array(imputer.transform(val_X), dtype=np.float32) test_X = np.array(imputer.transform(test_X), dtype=np.float32) print('Normalizing the data to have zero mean and unit variance ...') scaler = StandardScaler() scaler.fit(train_X) train_X = scaler.transform(train_X) val_X = scaler.transform(val_X) test_X = scaler.transform(test_X) common_utils.create_directory('results') for (penalty, C) in zip(penalties, Cs): file_name = '{}.{}.{}.C{}'.format(args.period, args.features, penalty, C) logreg = LogisticRegression(penalty=penalty, C=C, random_state=42) logreg.fit(train_X, train_y) with open(os.path.join('results', 'train_{}.json'.format(file_name)), "w") as res_file: ret = print_metrics_binary(train_y, logreg.predict_proba(train_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) with open(os.path.join('results', 'val_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_binary(val_y, logreg.predict_proba(val_X)) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) prediction = logreg.predict_proba(test_X)[:, 1] with open(os.path.join('results', 'test_{}.json'.format(file_name)), 'w') as res_file: ret = print_metrics_binary(test_y, prediction) ret = {k: float(v) for k, v in ret.items()} json.dump(ret, res_file) save_results(test_names, test_ts, prediction, test_y, os.path.join('predictions', file_name + '.csv'))
if args.small_part: args.save_every = 2**30 # Build readers, discretizers, normalizers if args.deep_supervision: train_data_loader = common_utils.DeepSupervisionDataLoader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv', small_part=args.small_part) val_data_loader = common_utils.DeepSupervisionDataLoader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv', small_part=args.small_part) else: train_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') if args.deep_supervision: discretizer_header = discretizer.transform( train_data_loader._data[0][0])[1].split(',') else: discretizer_header = discretizer.transform(
if args.small_part: args.save_every = 2**30 # Build readers, discretizers, normalizers if args.deep_supervision: train_data_loader = common_utils.DeepSupervisionDataLoader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv', small_part=args.small_part) val_data_loader = common_utils.DeepSupervisionDataLoader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv', small_part=args.small_part) else: train_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader( dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') if args.deep_supervision: discretizer_header = discretizer.transform( train_data_loader._data["X"][0])[1].split(',') else: discretizer_header = discretizer.transform(
args = parser.parse_args() print(args) if args.small_part: args.save_every = 2**30 # Build readers, discretizers, normalizers if args.deep_supervision: train_data_loader = common_utils.DeepSupervisionDataLoader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv'), small_part=args.small_part) val_data_loader = common_utils.DeepSupervisionDataLoader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv'), small_part=args.small_part) else: train_reader = DecompensationReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = DecompensationReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv')) discretizer = Discretizer(timestep=args.timestep, store_masks=True, impute_strategy='previous', start_time='zero') if args.deep_supervision: discretizer_header = discretizer.transform(train_data_loader._data["X"][0])[1].split(',') else: discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',') cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1] normalizer = Normalizer(fields=cont_channels) # choose here which columns to standardize
args = parser.parse_args() print args if args.small_part: args.save_every = 2**30 # Build readers, discretizers, normalizers if args.deep_supervision: train_data_loader = common_utils.DeepSupervisionDataLoader(dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv', small_part=args.small_part) val_data_loader = common_utils.DeepSupervisionDataLoader(dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv', small_part=args.small_part) else: train_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/train_listfile.csv') val_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/', listfile='../../data/decompensation/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') if args.deep_supervision: discretizer_header = discretizer.transform(train_data_loader._data["X"][0])[1].split(',') else: discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',') cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1] normalizer = Normalizer(fields=cont_channels) # choose here onlycont vs all
listfile=os.path.join(args.data, 'train_listfile.csv'), small_part=args.small_part, sources=sources, timesteps=args.timesteps, condensed=args.condensed) val_data_loader = common_utils.DeepSupervisionDataLoader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv'), small_part=args.small_part, sources=sources, timesteps=args.timesteps, condensed=args.condensed) else: train_reader = DecompensationReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv'), sources=sources, timesteps=args.timesteps, condensed=args.condensed) val_reader = DecompensationReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv'), sources=sources, timesteps=args.timesteps, condensed=args.condensed) train_reader = DecompensationReader( dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv'), sources=sources, timesteps=args.timesteps, condensed=args.condensed)