target_repl = (args.target_repl_coef > 0.0 and args.mode == 'train') # Build readers, discretizers, normalizers train_reader = MultitaskReader(dataset_dir='../../data/multitask/train/', listfile='../../data/multitask/train_listfile.csv') val_reader = MultitaskReader(dataset_dir='../../data/multitask/train/', listfile='../../data/multitask/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') discretizer_header = discretizer.transform(train_reader.read_example(0)[0])[1].split(',') cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1] normalizer = Normalizer(fields=cont_channels) # choose here onlycont vs all normalizer.load_params('mult_ts%s.input_str:%s.start_time:zero.normalizer' % (args.timestep, args.imputation)) args_dict = dict(args._get_kwargs()) args_dict['header'] = discretizer_header args_dict['ihm_pos'] = int(48.0 / args.timestep - 1e-6) args_dict['target_repl'] = target_repl # Build the model print "==> using model {}".format(args.network) model_module = imp.load_source(os.path.basename(args.network), args.network) model = model_module.Network(**args_dict) network = model # alias
target_repl = (args.target_repl_coef > 0.0 and args.mode == 'train') # Build readers, discretizers, normalizers train_reader = MultitaskReader(dataset_dir='../../data/multitask/train/', listfile='../../data/multitask/train_listfile.csv') val_reader = MultitaskReader(dataset_dir='../../data/multitask/train/', listfile='../../data/multitask/val_listfile.csv') discretizer = Discretizer(timestep=args.timestep, store_masks=True, imput_strategy='previous', start_time='zero') discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',') cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1] normalizer = Normalizer(fields=cont_channels) # choose here onlycont vs all normalizer.load_params('mult_ts%s.input_str:%s.start_time:zero.normalizer' % (args.timestep, args.imputation)) args_dict = dict(args._get_kwargs()) args_dict['header'] = discretizer_header args_dict['ihm_pos'] = int(48.0 / args.timestep - 1e-6) args_dict['target_repl'] = target_repl # Build the model print "==> using model {}".format(args.network) model_module = imp.load_source(os.path.basename(args.network), args.network) model = model_module.Network(**args_dict) suffix = ".bs{}{}{}.ts{}{}_partition={}_ihm={}_decomp={}_los={}_pheno={}".format(
if args.condensed: experiment_name=experiment_name+'condensed_' if args.small_part: args.save_every = 2 ** 30 target_repl = (args.target_repl_coef > 0.0 and args.mode == 'train') # Build readers, discretizers, normalizers train_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv'), sources=sources, timesteps=args.timesteps, condensed=args.condensed) val_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv'), sources=sources, timesteps=args.timesteps, condensed=args.condensed) reader_header = train_reader.read_example(0)['header'] n_bins = len(train_reader.read_example(0)) discretizer = Discretizer(timestep=args.timestep, store_masks=True, impute_strategy='previous', start_time='zero', header = reader_header, sources = sources) discretizer_header = discretizer.transform(train_reader.read_example(0)["X"])[1].split(',') cont_channels = [i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1] normalizer = Normalizer(fields=cont_channels) # choose here which columns to standardize normalizer_state = args.normalizer_state if normalizer_state is None: normalizer_state = 'mult_ts{}.input_str_{}.start_time_zero.normalizer'.format(args.timestep, args.imputation) normalizer_state = os.path.join(os.path.dirname(__file__), normalizer_state)
# Build readers, discretizers, normalizers train_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'train_listfile.csv')) val_reader = MultitaskReader(dataset_dir=os.path.join(args.data, 'train'), listfile=os.path.join(args.data, 'val_listfile.csv')) discretizer = Discretizer(timestep=args.timestep, store_masks=True, impute_strategy='previous', start_time='zero') discretizer_header = discretizer.transform( train_reader.read_example(0)["X"])[1].split(',') cont_channels = [ i for (i, x) in enumerate(discretizer_header) if x.find("->") == -1 ] normalizer = Normalizer( fields=cont_channels) # choose here which columns to standardize normalizer_state = args.normalizer_state if normalizer_state is None: normalizer_state = 'mult_ts{}.input_str_{}.start_time_zero.normalizer'.format( args.timestep, args.imputation) normalizer_state = os.path.join(os.path.dirname(__file__), normalizer_state) normalizer.load_params(normalizer_state) args_dict = dict(args._get_kwargs())