normalizer,
                                                   args.batch_size,
                                                   shuffle=True)
    val_data_gen = utils.BatchGenDeepSupervision(val_data_loader,
                                                 discretizer,
                                                 normalizer,
                                                 args.batch_size,
                                                 shuffle=False)
else:
    # Set number of batches in one epoch
    train_nbatches = 2000
    val_nbatches = 1000
    if args.small_part:
        train_nbatches = 40
        val_nbatches = 40
    train_data_gen = utils.BatchGen(train_reader, discretizer, normalizer,
                                    args.batch_size, train_nbatches, True)
    val_data_gen = utils.BatchGen(val_reader, discretizer, normalizer,
                                  args.batch_size, val_nbatches, False)

if args.mode == 'train':

    # Prepare training
    path = os.path.join(
        args.output_dir, 'keras_states/' + model.final_name +
        '.chunk{epoch}.test{val_loss}.state')

    metrics_callback = keras_utils.DecompensationMetrics(
        train_data_gen=train_data_gen,
        val_data_gen=val_data_gen,
        deep_supervision=args.deep_supervision,
        batch_size=args.batch_size,
# Load data and prepare generators
if args.deep_supervision:
    train_data_gen = utils.BatchGenDeepSupervisoin(train_data_loader,
                                                   discretizer, normalizer,
                                                   args.batch_size)
    val_data_gen = utils.BatchGenDeepSupervisoin(val_data_loader, discretizer,
                                                 normalizer, args.batch_size)
else:
    # Set number of batches in one epoch
    train_nbatches = 2000
    val_nbatches = 1000
    if (args.small_part):
        train_nbatches = 40
        val_nbatches = 40
    train_data_gen = utils.BatchGen(train_reader, discretizer, normalizer,
                                    args.batch_size, train_nbatches)
    val_data_gen = utils.BatchGen(val_reader, discretizer, normalizer,
                                  args.batch_size, val_nbatches)
    #train_data_gen.steps = train_reader.get_number_of_examples() // args.batch_size
    #val_data_gen.steps = val_reader.get_number_of_examples() // args.batch_size

if args.mode == 'train':

    # Prepare training
    path = 'keras_states/' + model.final_name + '.chunk{epoch}.test{val_loss}.state'

    metrics_callback = keras_utils.MetricsBinaryFromGenerator(
        train_data_gen, val_data_gen, args.batch_size, args.verbose)
    # make sure save directory exists
    dirname = os.path.dirname(path)
    if not os.path.exists(dirname):
# Load data and prepare generators
if args.deep_supervision:
    train_data_gen = utils.BatchGenDeepSupervisoin(train_data_loader,
                                                   discretizer, normalizer,
                                                   args.batch_size, True)
    val_data_gen = utils.BatchGenDeepSupervisoin(val_data_loader, discretizer,
                                                 normalizer, args.batch_size,
                                                 False)
else:
    # Set number of batches in one epoch
    train_nbatches = 2000
    val_nbatches = 1000
    if args.small_part:
        train_nbatches = 40
        val_nbatches = 40
    train_data_gen = utils.BatchGen(train_reader, discretizer, normalizer,
                                    args.batch_size, train_nbatches, True)
    val_data_gen = utils.BatchGen(val_reader, discretizer, normalizer,
                                  args.batch_size, val_nbatches, False)

if args.mode == 'train':

    # Prepare training
    path = 'keras_states/' + model.final_name + '.chunk{epoch}.test{val_loss}.state'

    metrics_callback = keras_utils.MetricsBinaryFromGenerator(
        train_data_gen, val_data_gen, args.batch_size, args.verbose)
    # make sure save directory exists
    dirname = os.path.dirname(path)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    saver = ModelCheckpoint(path, verbose=1, period=args.save_every)