Beispiel #1
0
K.set_session(sess)

if args.small_part:
    args.save_every = 2**30

# Build readers, discretizers, normalizers
train_reader = DecompensationReader(
    dataset_dir=os.path.join(args.data, 'train'),
    listfile=os.path.join(args.data, 'train_listfile.csv'))
val_reader = DecompensationReader(dataset_dir=os.path.join(args.data, 'train'),
                                  listfile=os.path.join(
                                      args.data, 'val_listfile.csv'))

discretizer = OneHotEncoder(impute_strategy=args.imputation)
discretizer_header = discretizer.transform(
    train_reader.read_example(0)["X"])[2].split(',')

cont_channels = [
    i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1
]

normalizer = Normalizer(
    fields=cont_channels)  # choose here which columns to standardize
normalizer_state = args.normalizer_state
if normalizer_state is None:
    normalizer_state = 'decomp_onehotenc_n:{}.normalizer'.format(
        args.n_samples)
    normalizer_state = os.path.join(os.path.dirname(__file__),
                                    normalizer_state)
normalizer.load_params(normalizer_state)
Beispiel #2
0
train_reader = DecompensationReader(
    dataset_dir='../../data/decompensation/train/',
    listfile='../../data/decompensation/train_listfile.csv')

val_reader = DecompensationReader(
    dataset_dir='../../data/decompensation/train/',
    listfile='../../data/decompensation/val_listfile.csv')

discretizer = Discretizer(timestep=args.timestep,
                          store_masks=True,
                          imput_strategy='previous',
                          start_time='zero')

discretizer_header = discretizer.transform(
    train_reader.read_example(0)[0])[1].split(',')
cont_channels = [
    i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1
]

normalizer = Normalizer(fields=cont_channels)  # choose here onlycont vs all
normalizer.load_params(
    'decomp_ts{}.input_str:previous.n1e5.start_time:zero.normalizer'.format(
        args.timestep))

args_dict = dict(args._get_kwargs())
args_dict['header'] = discretizer_header

# init class
print "==> using network %s" % args.network
network_module = importlib.import_module("networks." + args.network)
Beispiel #3
0
                                                             small_part=args.small_part)
else:
    train_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/',
                                        listfile='../../data/decompensation/train_listfile.csv')
    val_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/',
                                      listfile='../../data/decompensation/val_listfile.csv')

discretizer = Discretizer(timestep=args.timestep,
                          store_masks=True,
                          imput_strategy='previous',
                          start_time='zero')

if args.deep_supervision:
    discretizer_header = discretizer.transform(train_data_loader._data["X"][0])[1].split(',')
else:
    discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',')
cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1]

normalizer = Normalizer(fields=cont_channels)  # choose here onlycont vs all
normalizer.load_params('decomp_ts{}.input_str:previous.n1e5.start_time:zero.normalizer'.format(args.timestep))

args_dict = dict(args._get_kwargs())
args_dict['header'] = discretizer_header
args_dict['task'] = 'decomp'


# Build the model
print "==> using model {}".format(args.network)
model_module = imp.load_source(os.path.basename(args.network), args.network)
model = model_module.Network(**args_dict)
suffix = "{}.bs{}{}{}.ts{}".format("" if not args.deep_supervision else ".dsup",
Beispiel #4
0
parser.set_defaults(small_part=False)
args = parser.parse_args()
print args

train_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/',
                    listfile='../../data/decompensation/train_listfile.csv')

val_reader = DecompensationReader(dataset_dir='../../data/decompensation/train/',
                    listfile='../../data/decompensation/val_listfile.csv')

discretizer = Discretizer(timestep=args.timestep,
                          store_masks=True,
                          imput_strategy='previous',
                          start_time='zero')

discretizer_header = discretizer.transform(train_reader.read_example(0)[0])[1].split(',')
cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1]

normalizer = Normalizer(fields=cont_channels) # choose here onlycont vs all
normalizer.load_params('decomp_ts0.8.input_str:previous.n1e5.start_time:zero.normalizer')

args_dict = dict(args._get_kwargs())

# init class
print "==> using network %s" % args.network
network_module = importlib.import_module("networks." + args.network)
network = network_module.Network(**args_dict)
time_step_suffix = ".ts%.2f" % args.timestep
network_name = args.prefix + network.say_name() + time_step_suffix
print "==> network_name:", network_name
Beispiel #5
0
        condensed=args.condensed)
    val_reader = DecompensationReader(
        dataset_dir=os.path.join(args.data, 'train'),
        listfile=os.path.join(args.data, 'val_listfile.csv'),
        sources=sources,
        timesteps=args.timesteps,
        condensed=args.condensed)

train_reader = DecompensationReader(
    dataset_dir=os.path.join(args.data, 'train'),
    listfile=os.path.join(args.data, 'train_listfile.csv'),
    sources=sources,
    timesteps=args.timesteps,
    condensed=args.condensed)

reader_header = train_reader.read_example(0)['header']
n_bins = len(train_reader.read_example(0))

discretizer = Discretizer(timestep=args.timestep,
                          store_masks=True,
                          impute_strategy='previous',
                          start_time='zero',
                          header=reader_header,
                          sources=sources)

if args.deep_supervision:
    discretizer_header = discretizer.transform(
        train_data_loader._data["X"][0])[1].split(',')
else:
    discretizer_header = discretizer.transform(
        train_reader.read_example(0)["X"])[1].split(',')