Exemple #1
0
train_reader = PhenotypingReader(
    dataset_dir='../../data/phenotyping/train/',
    listfile='../../data/phenotyping/train_listfile.csv')

val_reader = PhenotypingReader(
    dataset_dir='../../data/phenotyping/train/',
    listfile='../../data/phenotyping/val_listfile.csv')

discretizer = Discretizer(timestep=float(args.timestep),
                          store_masks=True,
                          imput_strategy='previous',
                          start_time='zero')

discretizer_header = discretizer.transform(
    train_reader.read_example(0)[0])[1].split(',')
cont_channels = [
    i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1
]

normalizer = Normalizer(fields=cont_channels)  # choose here onlycont vs all
normalizer.load_params(
    'ph_ts%s.input_str:previous.start_time:zero.normalizer' % args.timestep)

train_raw = utils.load_phenotypes(train_reader, discretizer, normalizer,
                                  args.small_part)
test_raw = utils.load_phenotypes(val_reader, discretizer, normalizer,
                                 args.small_part)

args_dict = dict(args._get_kwargs())
args_dict['train_raw'] = train_raw
Exemple #2
0
        'type': 'numeric'
    },
    'Weight': {
        'uuid': '5089AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
        'units': 'kg',
        'type': 'units'
    },
    'pH': {
        #		'uuid':'4ffa7181-71a4-412b-8812-b25bc2a6aa47',
        'uuid': False,
        'units': '',
        'type': 'numeric'
    }
}

benchmark = phenotyping.read_example(record_num)
patient_id = benchmark['name'].split('_')[0]
benchmark_observations = benchmark['X']
headers = benchmark['header']
observations = []
for step in benchmark_observations:
    measurements = {}
    i = 0
    date = str((datetime(2000, 1, 1, 0, 0) +
                timedelta(hours=float(step[0]))).isoformat())
    for value in step:
        if (i > 0 and len(value) > 0):
            concept_uuid = concepts[headers[i]]['uuid']
            value_type = concepts[headers[i]]['type']
            units = concepts[headers[i]]['units']
            if (concept_uuid):
Exemple #3
0
target_repl = (args.target_repl_coef > 0.0 and args.mode == 'train')

# Build readers, discretizers, normalizers
train_reader = PhenotypingReader(dataset_dir='../../data/phenotyping/train/',
                                 listfile='../../data/phenotyping/train_listfile.csv')

val_reader = PhenotypingReader(dataset_dir='../../data/phenotyping/train/',
                               listfile='../../data/phenotyping/val_listfile.csv')

discretizer = Discretizer(timestep=float(args.timestep),
                          store_masks=True,
                          imput_strategy='previous',
                          start_time='zero')

discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',')
cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1]

normalizer = Normalizer(fields=cont_channels)  # choose here onlycont vs all
normalizer.load_params('ph_ts{}.input_str:previous.start_time:zero.normalizer'.format(args.timestep))

args_dict = dict(args._get_kwargs())
args_dict['header'] = discretizer_header
args_dict['task'] = 'ph'
args_dict['num_classes'] = 25
args_dict['target_repl'] = target_repl

# Build the model
print "==> using model {}".format(args.network)
model_module = imp.load_source(os.path.basename(args.network), args.network)
model = model_module.Network(**args_dict)
Exemple #4
0
target_repl = (args.target_repl_coef > 0.0 and args.mode == 'train')

# Build readers, discretizers, normalizers
train_reader = PhenotypingReader(dataset_dir=os.path.join(args.data, 'train'),
                                 listfile=os.path.join(args.data, 'train_listfile.csv'))

val_reader = PhenotypingReader(dataset_dir=os.path.join(args.data, 'train'),
                               listfile=os.path.join(args.data, 'val_listfile.csv'))

discretizer = Discretizer(timestep=float(args.timestep),
                          store_masks=True,
                          impute_strategy='previous',
                          start_time='zero')

discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',')
cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1]

normalizer = Normalizer(fields=cont_channels)  # choose here which columns to standardize
normalizer_state = args.normalizer_state
if normalizer_state is None:
    normalizer_state = 'ph_ts{}.input_str:previous.start_time:zero.normalizer'.format(args.timestep)
    normalizer_state = os.path.join(os.path.dirname(__file__), normalizer_state)
normalizer.load_params(normalizer_state)

args_dict = dict(args._get_kwargs())
args_dict['header'] = discretizer_header
args_dict['task'] = 'ph'
args_dict['num_classes'] = 25
args_dict['target_repl'] = target_repl