예제 #1
0
파일: retrain.py 프로젝트: sambuddinc/DLTK
def tune_train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    patch_filenames = pd.read_csv(args.train_csv,
                                  dtype=object,
                                  keep_default_na=False,
                                  na_values=[]).as_matrix()

    subj_filenames = pd.read_csv(args.val_csv,
                                 dtype=object,
                                 keep_default_na=False,
                                 na_values=[]).as_matrix()

    app_json = get_config_for_app()

    val_filenames = []

    for row in subj_filenames:
        if row[4] == '1':
            val_filenames.append(row)

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 16,
        'example_size': [1, 64, 64],
        'extract_examples': True
    }
    num_channels = app_json['num_channels']
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [
                num_channels,
            ]
        },
        'labels': {
            'y': reader_params['example_size']
        }
    }

    # module_name = 'contributions.applications.AL_framework.applications.app' + str(app_json['id']) + '.readers.'
    #
    # if app_json['reader_type'] == "Patch":
    #     module_name = module_name + 'patch_reader'
    # elif app_json['reader_type'] == "Slice":
    #     module_name = module_name + 'slice_reader'
    # elif app_json['reader_type'] == "Stack":
    #     module_name = module_name + 'stack_reader'
    # else:
    #     print("Unsupported reader type: please specify a new one")
    #     return

    # mod = import_module(module_name)
    mod = import_module('readers.tune_reader')
    read_fn = vars(mod)['read_fn']

    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    batch_size = app_json['batch_size']
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=patch_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=batch_size,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=batch_size,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting tuning...')
    max_steps = app_json['max_steps']
    try:
        for _ in range(max_steps // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))

    except KeyboardInterrupt:
        pass

    print('Stopping now.')
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_example_shapes))
    print('Model saved to {}.'.format(export_dir))
    app_json['model_status'] = 2
    write_app_config(app_json)
    print('Updated model status in model config')
예제 #2
0
파일: train.py 프로젝트: sambuddinc/DLTK
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(args.train_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).as_matrix()

    train_filenames = all_filenames[1:10]
    val_filenames = all_filenames[10:12]

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 16,
        'example_size': [1, 64, 64],
        'extract_examples': True
    }
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [
                NUM_CHANNELS,
            ]
        },
        'labels': {
            'y': reader_params['example_size']
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')
    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))

    except KeyboardInterrupt:
        pass

    print('Stopping now.')
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_example_shapes))
    print('Model saved to {}.'.format(export_dir))
예제 #3
0
파일: train.py 프로젝트: Mulugeta/DLTK
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(
        args.data_csv,
        dtype=object,
        keep_default_na=False,
        na_values=[]).as_matrix()

    train_filenames = all_filenames[:100]
    val_filenames = all_filenames[100:]

    # Set up a data reader to handle the file i/o.
    reader_params = {'n_examples': 8,
                     'example_size': [32, 128, 128],
                     'extract_examples': True}
    reader_example_shapes = {'features': {'x': reader_params['example_size'] + [NUM_CHANNELS, ]}}

    reader = Reader(read_fn, {'features': {'x': tf.float32}})

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_path,
        params={'learning_rate': 0.01, 'upsampling_factor': UPSAMPLING_FACTOR},
        config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(
        every_n_steps=EVAL_EVERY_N_STEPS,
        output_dir=args.model_path)

    print('Starting training...')
    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            nn.train(
                input_fn=train_input_fn,
                hooks=[train_qinit_hook, step_cnt_hook],
                steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))

    except KeyboardInterrupt:
        pass

    print('Stopping now.')
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))
    print('Model saved to {}.'.format(export_dir))
예제 #4
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names

    target_df = pd.read_csv('/data1/users/adoyle/IBIS/ibis_t1_qc.csv', dtype=object, keep_default_na=False)

    all_filenames = target_df.iloc[:, 0].tolist()
    all_labels = target_df.iloc[:, 1].tolist()

    skf = StratifiedKFold(n_splits=10)

    for train_filenames, val_filenames in skf.split(all_filenames, all_labels):

        # Set up a data reader to handle the file i/o.
        reader_params = {'n_examples': 2,
                         'example_size': [160, 256, 224],
                         'extract_examples': True}

        reader_example_shapes = {'features': {'x': reader_params['example_size'] + [NUM_CHANNELS]},
                                 'labels': {'y': [1]}}
        reader = Reader(read_fn,
                        {'features': {'x': tf.float32},
                         'labels': {'y': tf.int32}})

        # Get input functions and queue initialisation hooks for training and
        # validation data
        train_input_fn, train_qinit_hook = reader.get_inputs(
            file_references=train_filenames,
            mode=tf.estimator.ModeKeys.TRAIN,
            example_shapes=reader_example_shapes,
            batch_size=BATCH_SIZE,
            shuffle_cache_size=SHUFFLE_CACHE_SIZE,
            params=reader_params)

        val_input_fn, val_qinit_hook = reader.get_inputs(
            file_references=val_filenames,
            mode=tf.estimator.ModeKeys.EVAL,
            example_shapes=reader_example_shapes,
            batch_size=BATCH_SIZE,
            shuffle_cache_size=SHUFFLE_CACHE_SIZE,
            params=reader_params)

        # Instantiate the neural network estimator
        nn = tf.estimator.Estimator(
            model_fn=model_fn,
            model_dir=args.model_path,
            params={"learning_rate": 0.001},
            config=tf.estimator.RunConfig())

        # Hooks for validation summaries
        val_summary_hook = tf.contrib.training.SummaryAtEndHook(
            os.path.join(args.model_path, 'eval'))
        step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                                 output_dir=args.model_path)

        print('Starting training...')
        try:
            for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
                nn.train(
                    input_fn=train_input_fn,
                    hooks=[train_qinit_hook, step_cnt_hook],
                    steps=EVAL_EVERY_N_STEPS)

                if args.run_validation:
                    results_val = nn.evaluate(
                        input_fn=val_input_fn,
                        hooks=[val_qinit_hook, val_summary_hook],
                        steps=EVAL_STEPS)
                    print('Step = {}; val loss = {:.5f};'.format(
                        results_val['global_step'],
                        results_val['loss']))

        except KeyboardInterrupt:
            pass

        # When exporting we set the expected input shape to be arbitrary.
        export_dir = nn.export_savedmodel(
            export_dir_base=args.model_path,
            serving_input_receiver_fn=reader.serving_input_receiver_fn(
                {'features': {'x': [None, None, None, NUM_CHANNELS]},
                 'labels': {'y': [1]}}))
        print('Model saved to {}.'.format(export_dir))
def train(args):
    np.random.seed(8)
    tf.set_random_seed(8)

    print('Setting Up...')

    # Read Training-Fold.csv
    train_filenames = pd.read_csv(args.train_csv,
                                  dtype=object,
                                  keep_default_na=False,
                                  na_values=[]).values

    # Read Validation-Fold.csv
    val_filenames = pd.read_csv(args.val_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).values

    # Set DLTK Reader Parameters (No. of Patches, Patch Size)
    reader_params = {
        'n_patches': NUM_PATCHES,
        'patch_size': [PATCH_Z, PATCH_XY, PATCH_XY],  # Target Patch Size
        'extract_patches': True
    }  # Enable Training Mode Patch Extraction

    # Set Patch Dimensions
    reader_patch_shapes = {
        'features': {
            'x': reader_params['patch_size'] + [
                NUM_CHANNELS,
            ]
        },
        'labels': {
            'y': reader_params['patch_size']
        }
    }

    # Initiate Data Reader + Patch Extraction
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Create Input Functions + Queue Initialisation Hooks for Training/Validation Data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_patch_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        prefetch_cache_size=PREFETCH_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_patch_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        prefetch_cache_size=PREFETCH_CACHE_SIZE,
        params=reader_params)

    # Instantiate Neural Network Estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                config=tf.estimator.RunConfig())

    # Hooks for Validation Summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Begin Training...')
    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)

                EPOCH_DISPLAY = int(
                    int(results_val['global_step']) /
                    (TRAIN_SIZE / BATCH_SIZE))
                print('Epoch = {}; Step = {} / ValLoss = {:.5f};'.format(
                    EPOCH_DISPLAY, results_val['global_step'],
                    results_val['loss']))

                dim = args.model_path + 'Step{}ValLoss{:.5f}'.format(
                    results_val['global_step'], results_val['loss'])
                export_dir = nn.export_savedmodel(
                    export_dir_base=dim,
                    serving_input_receiver_fn=reader.serving_input_receiver_fn(
                        reader_patch_shapes))
                print('Model saved to {}.'.format(export_dir))
                count_steps.append(results_val['global_step'])
                count_loss.append(results_val['loss'])

    except KeyboardInterrupt:
        pass

    # Arbitrary Input Shape during Export
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_patch_shapes))
    print('Model saved to {}.'.format(export_dir))

    step_Loss = pd.DataFrame(list(zip(count_steps, count_loss)),
                             columns=['steps', 'val_loss'])
    step_Loss.to_csv("RU_Validation_Loss.csv", encoding='utf-8', index=False)
예제 #6
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(args.data_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).as_matrix()

    # 3300
    train_filenames = all_filenames[:3300]
    val_filenames = all_filenames[3300:]

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 1,
        'example_size': RESIZE_SIZE,
        'extract_examples': True
    }

    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [NUM_CHANNELS]
        },
        'labels': {
            'y': []
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    nn = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_path,
        params={"learning_rate": 1e-3},
        config=tf.estimator.RunConfig(session_config=config))

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    train_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'train'))
    # train_summary_hook = tf.train.SummarySaverHook(save_steps=1,
    #                                                output_dir=os.path.join(args.model_path, 'train'),
    #                                                summary_op=tf.summary.)

    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')
    try:
        for _ in tqdm(range(MAX_STEPS // EVAL_EVERY_N_STEPS)):
            nn.train(
                input_fn=train_input_fn,
                hooks=[train_qinit_hook, step_cnt_hook, train_summary_hook],
                steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f}; val acc = {:.5f}'.format(
                    results_val['global_step'], results_val['loss'],
                    results_val['accuracy_val']))

    except KeyboardInterrupt:
        pass

    # When exporting we set the expected input shape to be arbitrary.
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn({
            'features': {
                'x': [None, None, None, NUM_CHANNELS]
            },
            'labels': {
                'y': [1]
            }
        }))
    print('Model saved to {}.'.format(export_dir))
예제 #7
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names

    train_df = pd.read_csv(args.train_csv)
    val_df = pd.read_csv(args.val_csv)

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 2,
        'example_size': [121, 145, 121],
        'extract_examples': False
    }

    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [NUM_CHANNELS]
        },
        'labels': {
            'y': [1]
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.float32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_df,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_df,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')

    best_model_path = os.path.join(args.model_path, 'best')
    best_val_loss = None

    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            print('Training epoch {}/{}'.format(
                _, MAX_STEPS // EVAL_EVERY_N_STEPS))
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))
                if best_val_loss is None or results_val['loss'] < best_val_loss:
                    os.system('rm -rf {}/{}'.format(best_model_path, '*'))
                    export_dir = nn.export_savedmodel(
                        export_dir_base=os.path.join(args.model_path, 'best'),
                        serving_input_receiver_fn=reader.
                        serving_input_receiver_fn({
                            'features': {
                                'x': [None, None, None, NUM_CHANNELS]
                            },
                            'labels': {
                                'y': [1]
                            }
                        }))
                    print('Best Model saved to {}.'.format(export_dir))
                    best_val_loss = results_val['loss']

    except KeyboardInterrupt:
        pass

    # When exporting we set the expected input shape to be arbitrary.
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn({
            'features': {
                'x': [None, None, None, NUM_CHANNELS]
            },
            'labels': {
                'y': [1]
            }
        }))
    print('Model saved to {}.'.format(export_dir))