Beispiel #1
0
def setup_reader():
    NUM_CHANNELS = 1

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 32,
        'example_size': [64, 64, 64],
        'extract_examples': True
    }

    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [
                NUM_CHANNELS,
            ]
        },
        'labels': {
            'y': reader_params['example_size']
        }
    }

    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    return (reader, reader_example_shapes, reader_params)
Beispiel #2
0
def call_zac(txt_path, model_path):
    # Read in the csv with the file names you would want to predict on
    filepath = txt_path
    with open(filepath) as fp:
        all_filenames = fp.readlines()
    file_names = [x.strip() for x in all_filenames]
    # file_names = np.loadtxt(txt_path, dtype=np.str)
    # file_names = pd.read_csv(
    #     '/media/data/Track_2/New_Labels_For_Track_2.csv',
    #     dtype=object,
    #     keep_default_na=False,
    #     na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    # file_names = file_names[-N_VALIDATION_SUBJECTS:]

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    nn = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=model_path,
        params={"learning_rate": 1e-3},
        config=tf.estimator.RunConfig(session_config=config))

    reader_params = {
        'n_examples': 1,
        'example_size': RESIZE_SIZE,
        'extract_examples': True
    }
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [1]
        },
        'labels': {
            'y': []
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })
    input_fn, qinit_hook = reader.get_inputs(
        file_references=file_names,
        mode=tf.estimator.ModeKeys.PREDICT,
        example_shapes=reader_example_shapes,
        batch_size=1,
        shuffle_cache_size=1,
        params=reader_params)

    features_to_save = []
    labels_to_save = []
    for i in tqdm(nn.predict(input_fn, hooks=[qinit_hook])):
        features_to_save.append(i['features'])
        labels_to_save.append(i['y_'])

    return np.array(features_to_save), np.array(labels_to_save)
Beispiel #3
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(args.train_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).as_matrix()

    train_filenames = all_filenames[1:10]
    val_filenames = all_filenames[10:12]

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 16,
        'example_size': [1, 64, 64],
        'extract_examples': True
    }
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [
                NUM_CHANNELS,
            ]
        },
        'labels': {
            'y': reader_params['example_size']
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')
    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))

    except KeyboardInterrupt:
        pass

    print('Stopping now.')
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_example_shapes))
    print('Model saved to {}.'.format(export_dir))
Beispiel #4
0
def tune_train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    patch_filenames = pd.read_csv(args.train_csv,
                                  dtype=object,
                                  keep_default_na=False,
                                  na_values=[]).as_matrix()

    subj_filenames = pd.read_csv(args.val_csv,
                                 dtype=object,
                                 keep_default_na=False,
                                 na_values=[]).as_matrix()

    app_json = get_config_for_app()

    val_filenames = []

    for row in subj_filenames:
        if row[4] == '1':
            val_filenames.append(row)

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 16,
        'example_size': [1, 64, 64],
        'extract_examples': True
    }
    num_channels = app_json['num_channels']
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [
                num_channels,
            ]
        },
        'labels': {
            'y': reader_params['example_size']
        }
    }

    # module_name = 'contributions.applications.AL_framework.applications.app' + str(app_json['id']) + '.readers.'
    #
    # if app_json['reader_type'] == "Patch":
    #     module_name = module_name + 'patch_reader'
    # elif app_json['reader_type'] == "Slice":
    #     module_name = module_name + 'slice_reader'
    # elif app_json['reader_type'] == "Stack":
    #     module_name = module_name + 'stack_reader'
    # else:
    #     print("Unsupported reader type: please specify a new one")
    #     return

    # mod = import_module(module_name)
    mod = import_module('readers.tune_reader')
    read_fn = vars(mod)['read_fn']

    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    batch_size = app_json['batch_size']
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=patch_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=batch_size,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=batch_size,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting tuning...')
    max_steps = app_json['max_steps']
    try:
        for _ in range(max_steps // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))

    except KeyboardInterrupt:
        pass

    print('Stopping now.')
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_example_shapes))
    print('Model saved to {}.'.format(export_dir))
    app_json['model_status'] = 2
    write_app_config(app_json)
    print('Updated model status in model config')
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names

    target_df = pd.read_csv('/data1/users/adoyle/IBIS/ibis_t1_qc.csv', dtype=object, keep_default_na=False)

    all_filenames = target_df.iloc[:, 0].tolist()
    all_labels = target_df.iloc[:, 1].tolist()

    skf = StratifiedKFold(n_splits=10)

    for train_filenames, val_filenames in skf.split(all_filenames, all_labels):

        # Set up a data reader to handle the file i/o.
        reader_params = {'n_examples': 2,
                         'example_size': [160, 256, 224],
                         'extract_examples': True}

        reader_example_shapes = {'features': {'x': reader_params['example_size'] + [NUM_CHANNELS]},
                                 'labels': {'y': [1]}}
        reader = Reader(read_fn,
                        {'features': {'x': tf.float32},
                         'labels': {'y': tf.int32}})

        # Get input functions and queue initialisation hooks for training and
        # validation data
        train_input_fn, train_qinit_hook = reader.get_inputs(
            file_references=train_filenames,
            mode=tf.estimator.ModeKeys.TRAIN,
            example_shapes=reader_example_shapes,
            batch_size=BATCH_SIZE,
            shuffle_cache_size=SHUFFLE_CACHE_SIZE,
            params=reader_params)

        val_input_fn, val_qinit_hook = reader.get_inputs(
            file_references=val_filenames,
            mode=tf.estimator.ModeKeys.EVAL,
            example_shapes=reader_example_shapes,
            batch_size=BATCH_SIZE,
            shuffle_cache_size=SHUFFLE_CACHE_SIZE,
            params=reader_params)

        # Instantiate the neural network estimator
        nn = tf.estimator.Estimator(
            model_fn=model_fn,
            model_dir=args.model_path,
            params={"learning_rate": 0.001},
            config=tf.estimator.RunConfig())

        # Hooks for validation summaries
        val_summary_hook = tf.contrib.training.SummaryAtEndHook(
            os.path.join(args.model_path, 'eval'))
        step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                                 output_dir=args.model_path)

        print('Starting training...')
        try:
            for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
                nn.train(
                    input_fn=train_input_fn,
                    hooks=[train_qinit_hook, step_cnt_hook],
                    steps=EVAL_EVERY_N_STEPS)

                if args.run_validation:
                    results_val = nn.evaluate(
                        input_fn=val_input_fn,
                        hooks=[val_qinit_hook, val_summary_hook],
                        steps=EVAL_STEPS)
                    print('Step = {}; val loss = {:.5f};'.format(
                        results_val['global_step'],
                        results_val['loss']))

        except KeyboardInterrupt:
            pass

        # When exporting we set the expected input shape to be arbitrary.
        export_dir = nn.export_savedmodel(
            export_dir_base=args.model_path,
            serving_input_receiver_fn=reader.serving_input_receiver_fn(
                {'features': {'x': [None, None, None, NUM_CHANNELS]},
                 'labels': {'y': [1]}}))
        print('Model saved to {}.'.format(export_dir))
def train(args):
    np.random.seed(8)
    tf.set_random_seed(8)

    print('Setting Up...')

    # Read Training-Fold.csv
    train_filenames = pd.read_csv(args.train_csv,
                                  dtype=object,
                                  keep_default_na=False,
                                  na_values=[]).values

    # Read Validation-Fold.csv
    val_filenames = pd.read_csv(args.val_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).values

    # Set DLTK Reader Parameters (No. of Patches, Patch Size)
    reader_params = {
        'n_patches': NUM_PATCHES,
        'patch_size': [PATCH_Z, PATCH_XY, PATCH_XY],  # Target Patch Size
        'extract_patches': True
    }  # Enable Training Mode Patch Extraction

    # Set Patch Dimensions
    reader_patch_shapes = {
        'features': {
            'x': reader_params['patch_size'] + [
                NUM_CHANNELS,
            ]
        },
        'labels': {
            'y': reader_params['patch_size']
        }
    }

    # Initiate Data Reader + Patch Extraction
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Create Input Functions + Queue Initialisation Hooks for Training/Validation Data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_patch_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        prefetch_cache_size=PREFETCH_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_patch_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        prefetch_cache_size=PREFETCH_CACHE_SIZE,
        params=reader_params)

    # Instantiate Neural Network Estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                config=tf.estimator.RunConfig())

    # Hooks for Validation Summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Begin Training...')
    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)

                EPOCH_DISPLAY = int(
                    int(results_val['global_step']) /
                    (TRAIN_SIZE / BATCH_SIZE))
                print('Epoch = {}; Step = {} / ValLoss = {:.5f};'.format(
                    EPOCH_DISPLAY, results_val['global_step'],
                    results_val['loss']))

                dim = args.model_path + 'Step{}ValLoss{:.5f}'.format(
                    results_val['global_step'], results_val['loss'])
                export_dir = nn.export_savedmodel(
                    export_dir_base=dim,
                    serving_input_receiver_fn=reader.serving_input_receiver_fn(
                        reader_patch_shapes))
                print('Model saved to {}.'.format(export_dir))
                count_steps.append(results_val['global_step'])
                count_loss.append(results_val['loss'])

    except KeyboardInterrupt:
        pass

    # Arbitrary Input Shape during Export
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn(
            reader_patch_shapes))
    print('Model saved to {}.'.format(export_dir))

    step_Loss = pd.DataFrame(list(zip(count_steps, count_loss)),
                             columns=['steps', 'val_loss'])
    step_Loss.to_csv("RU_Validation_Loss.csv", encoding='utf-8', index=False)
Beispiel #7
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(args.data_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).as_matrix()

    # 3300
    train_filenames = all_filenames[:3300]
    val_filenames = all_filenames[3300:]

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 1,
        'example_size': RESIZE_SIZE,
        'extract_examples': True
    }

    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [NUM_CHANNELS]
        },
        'labels': {
            'y': []
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_filenames,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    nn = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_path,
        params={"learning_rate": 1e-3},
        config=tf.estimator.RunConfig(session_config=config))

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    train_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'train'))
    # train_summary_hook = tf.train.SummarySaverHook(save_steps=1,
    #                                                output_dir=os.path.join(args.model_path, 'train'),
    #                                                summary_op=tf.summary.)

    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')
    try:
        for _ in tqdm(range(MAX_STEPS // EVAL_EVERY_N_STEPS)):
            nn.train(
                input_fn=train_input_fn,
                hooks=[train_qinit_hook, step_cnt_hook, train_summary_hook],
                steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f}; val acc = {:.5f}'.format(
                    results_val['global_step'], results_val['loss'],
                    results_val['accuracy_val']))

    except KeyboardInterrupt:
        pass

    # When exporting we set the expected input shape to be arbitrary.
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn({
            'features': {
                'x': [None, None, None, NUM_CHANNELS]
            },
            'labels': {
                'y': [1]
            }
        }))
    print('Model saved to {}.'.format(export_dir))
Beispiel #8
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names
    all_filenames = pd.read_csv(args.data_csv,
                                dtype=object,
                                keep_default_na=False,
                                na_values=[]).as_matrix()

    train_filenames = all_filenames

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 10,
        'example_size': [4, 224, 224],
        'extract_examples': True
    }

    reader_example_shapes = {
        'labels': [4, 64, 64, 1],
        'features': {
            'noise': [1, 1, 1, 100]
        }
    }

    reader = Reader(read_fn, {
        'features': {
            'noise': tf.float32
        },
        'labels': tf.float32
    })

    # Get input functions and queue initialisation hooks for data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_filenames,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        params=reader_params)

    # See TFGAN's `train.py` for a description of the generator and
    # discriminator API.
    def generator_fn(generator_inputs):
        """Generator function to build fake data samples. It creates a network
        given input features (e.g. from a dltk.io.abstract_reader). Further,
        custom Tensorboard summary ops can be added. For additional
        information, please refer to https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/gan/estimator/GANEstimator.

        Args:
            generator_inputs (tf.Tensor): Noise input to generate samples from.

        Returns:
            tf.Tensor: Generated data samples
        """
        gen = dcgan_generator_3d(inputs=generator_inputs['noise'],
                                 mode=tf.estimator.ModeKeys.TRAIN)
        gen = gen['gen']
        gen = tf.nn.tanh(gen)
        return gen

    def discriminator_fn(data, conditioning):
        """Discriminator function to discriminate real and fake data. It creates
        a network given input features (e.g. from a dltk.io.abstract_reader).
        Further, custom Tensorboard summary ops can be added. For additional
        information, please refer to https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/gan/estimator/GANEstimator.

        Args:
            generator_inputs (tf.Tensor): Noise input to generate samples from.

        Returns:
            tf.Tensor: Generated data samples
        """
        tf.summary.image('data', data[:, 0])

        disc = dcgan_discriminator_3d(inputs=data,
                                      mode=tf.estimator.ModeKeys.TRAIN)

        return disc['logits']

    # get input tensors from queue
    features, labels = train_input_fn()

    # build generator
    with tf.variable_scope('generator'):
        gen = generator_fn(features)

    # build discriminator on fake data
    with tf.variable_scope('discriminator'):
        disc_fake = discriminator_fn(gen, None)

    # build discriminator on real data, reusing the previously created variables
    with tf.variable_scope('discriminator', reuse=True):
        disc_real = discriminator_fn(labels, None)

    # building an LSGAN loss for the real examples
    d_loss_real = tf.losses.mean_squared_error(disc_real,
                                               tf.ones_like(disc_real))

    # calculating a pseudo accuracy for the discriminator detecting a real
    # sample and logging that
    d_pred_real = tf.cast(tf.greater(disc_real, 0.5), tf.float32)
    _, d_acc_real = tf.metrics.accuracy(tf.ones_like(disc_real), d_pred_real)
    tf.summary.scalar('disc/real_acc', d_acc_real)

    # building an LSGAN loss for the fake examples
    d_loss_fake = tf.losses.mean_squared_error(disc_fake,
                                               tf.zeros_like(disc_fake))

    # calculating a pseudo accuracy for the discriminator detecting a fake
    # sample and logging that
    d_pred_fake = tf.cast(tf.greater(disc_fake, 0.5), tf.float32)
    _, d_acc_fake = tf.metrics.accuracy(tf.zeros_like(disc_fake), d_pred_fake)
    tf.summary.scalar('disc/fake_acc', d_acc_fake)

    # building an LSGAN loss for the generator
    g_loss = tf.losses.mean_squared_error(disc_fake, tf.ones_like(disc_fake))
    tf.summary.scalar('loss/gen', g_loss)

    # combining the discriminator losses
    d_loss = d_loss_fake + d_loss_real
    tf.summary.scalar('loss/disc', d_loss)

    # getting the list of discriminator variables
    d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                               'discriminator')

    # building the discriminator optimizer
    d_opt = tf.train.AdamOptimizer(0.001, 0.5,
                                   epsilon=1e-5).minimize(d_loss,
                                                          var_list=d_vars)

    # getting the list of generator variables
    g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator')

    # building the generator optimizer
    g_opt = tf.train.AdamOptimizer(0.001, 0.5,
                                   epsilon=1e-5).minimize(g_loss,
                                                          var_list=g_vars)

    # getting a variable to hold the global step
    global_step = tf.train.get_or_create_global_step()
    # build op to increment the global step - important for TensorBoard logging
    inc_step = global_step.assign_add(1)

    # build the training session.
    # NOTE: we are not using a tf.estimator here, because they prevent some
    # flexibility in the training procedure
    s = tf.train.MonitoredTrainingSession(checkpoint_dir=args.model_path,
                                          save_summaries_steps=100,
                                          save_summaries_secs=None,
                                          hooks=[train_qinit_hook])

    # build dummy logging string
    log = 'Step {} with Loss D: {}, Loss G: {}, Acc Real: {} Acc Fake: {}'

    # start training
    print('Starting training...')
    loss_d = 0
    loss_g = 0
    try:
        for step in range(MAX_STEPS):
            # if discriminator is too good, only train generator
            if not loss_g > 3 * loss_d:
                s.run(d_opt)

            # if generator is too good, only train discriminator
            if not loss_d > 3 * loss_g:
                s.run(g_opt)

            # increment global step for logging hooks
            s.run(inc_step)

            # get statistics for training scheduling
            loss_d, loss_g, acc_d, acc_g = s.run(
                [d_loss, g_loss, d_acc_real, d_acc_fake])

            # print stats for information
            if step % SAVE_SUMMARY_STEPS == 0:
                print(log.format(step, loss_d, loss_g, acc_d, acc_g))
    except KeyboardInterrupt:
        pass
    print('Stopping now.')
Beispiel #9
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    # file_names = file_names[-N_VALIDATION_SUBJECTS:]

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    nn = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_path,
        params={"learning_rate": 1e-3},
        config=tf.estimator.RunConfig(session_config=config))

    reader_params = {
        'n_examples': 1,
        'example_size': RESIZE_SIZE,
        'extract_examples': True
    }
    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [1]
        },
        'labels': {
            'y': []
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.int32
        }
    })
    input_fn, qinit_hook = reader.get_inputs(
        file_references=file_names,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=1,
        shuffle_cache_size=1,
        params=reader_params)

    features_to_save = []
    labels_to_save = []
    for i in tqdm(nn.predict(input_fn, hooks=[qinit_hook])):
        features_to_save.append(i['features'])
        labels_to_save.append(i['y_'])

    np.savetxt('embeds.csv',
               np.array(features_to_save),
               delimiter=',',
               newline='\n')
    np.savetxt('labels.csv',
               np.array(labels_to_save),
               delimiter=',',
               newline='\n')
Beispiel #10
0
def train(args):
    np.random.seed(42)
    tf.set_random_seed(42)

    print('Setting up...')

    # Parse csv files for file names

    train_df = pd.read_csv(args.train_csv)
    val_df = pd.read_csv(args.val_csv)

    # Set up a data reader to handle the file i/o.
    reader_params = {
        'n_examples': 2,
        'example_size': [121, 145, 121],
        'extract_examples': False
    }

    reader_example_shapes = {
        'features': {
            'x': reader_params['example_size'] + [NUM_CHANNELS]
        },
        'labels': {
            'y': [1]
        }
    }
    reader = Reader(read_fn, {
        'features': {
            'x': tf.float32
        },
        'labels': {
            'y': tf.float32
        }
    })

    # Get input functions and queue initialisation hooks for training and
    # validation data
    train_input_fn, train_qinit_hook = reader.get_inputs(
        file_references=train_df,
        mode=tf.estimator.ModeKeys.TRAIN,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    val_input_fn, val_qinit_hook = reader.get_inputs(
        file_references=val_df,
        mode=tf.estimator.ModeKeys.EVAL,
        example_shapes=reader_example_shapes,
        batch_size=BATCH_SIZE,
        shuffle_cache_size=SHUFFLE_CACHE_SIZE,
        params=reader_params)

    # Instantiate the neural network estimator
    nn = tf.estimator.Estimator(model_fn=model_fn,
                                model_dir=args.model_path,
                                params={"learning_rate": 0.001},
                                config=tf.estimator.RunConfig())

    # Hooks for validation summaries
    val_summary_hook = tf.contrib.training.SummaryAtEndHook(
        os.path.join(args.model_path, 'eval'))
    step_cnt_hook = tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS,
                                             output_dir=args.model_path)

    print('Starting training...')

    best_model_path = os.path.join(args.model_path, 'best')
    best_val_loss = None

    try:
        for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
            print('Training epoch {}/{}'.format(
                _, MAX_STEPS // EVAL_EVERY_N_STEPS))
            nn.train(input_fn=train_input_fn,
                     hooks=[train_qinit_hook, step_cnt_hook],
                     steps=EVAL_EVERY_N_STEPS)

            if args.run_validation:
                results_val = nn.evaluate(
                    input_fn=val_input_fn,
                    hooks=[val_qinit_hook, val_summary_hook],
                    steps=EVAL_STEPS)
                print('Step = {}; val loss = {:.5f};'.format(
                    results_val['global_step'], results_val['loss']))
                if best_val_loss is None or results_val['loss'] < best_val_loss:
                    os.system('rm -rf {}/{}'.format(best_model_path, '*'))
                    export_dir = nn.export_savedmodel(
                        export_dir_base=os.path.join(args.model_path, 'best'),
                        serving_input_receiver_fn=reader.
                        serving_input_receiver_fn({
                            'features': {
                                'x': [None, None, None, NUM_CHANNELS]
                            },
                            'labels': {
                                'y': [1]
                            }
                        }))
                    print('Best Model saved to {}.'.format(export_dir))
                    best_val_loss = results_val['loss']

    except KeyboardInterrupt:
        pass

    # When exporting we set the expected input shape to be arbitrary.
    export_dir = nn.export_savedmodel(
        export_dir_base=args.model_path,
        serving_input_receiver_fn=reader.serving_input_receiver_fn({
            'features': {
                'x': [None, None, None, NUM_CHANNELS]
            },
            'labels': {
                'y': [1]
            }
        }))
    print('Model saved to {}.'.format(export_dir))