:param nn_last_layer: TF Tensor of the last layer in the neural network
    :param correct_label: TF Placeholder for the correct label image
    :param learning_rate: TF Placeholder for the learning rate
    :param num_classes: Number of classes to classify
    :return: Tuple of (logits, train_op, cross_entropy_loss)
    """
    # TODO: Implement function
    logits = tf.reshape(nn_last_layer, (-1, num_classes))
    #define loss function
    correct_label = tf.reshape(correct_label, (-1, num_classes))
    cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))
    #define training operation
    optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
    train_op = optimizer.minimize(cross_entropy_loss)
    return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)


def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
             correct_label, keep_prob, learning_rate):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    """
    Build the TensorFLow loss and optimizer operations.
    :param nn_last_layer: TF Tensor of the last layer in the neural network
    :param correct_label: TF Placeholder for the correct label image
    :param learning_rate: TF Placeholder for the learning rate
    :param num_classes: Number of classes to classify
    :return: Tuple of (logits, train_op, cross_entropy_loss)
    """
    # DONE: Implement function
    logits = tf.reshape(nn_last_layer, (-1, num_classes))
    correct_label = tf.reshape(correct_label, (-1, num_classes))
    cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_op = optimizer.minimize(cross_entropy_loss)
    return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)


def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
             correct_label, keep_prob, learning_rate):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
Beispiel #3
0
def perform_tests():
    tests.test_for_kitti_dataset(data_dir)
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)
def run_tests():
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)
Beispiel #5
0
def run_tests(data_dir):
    tests.test_for_kitti_dataset(data_dir)
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)
Beispiel #6
0
 def run_tests(self):
     tests.test_load_vgg(self.load_vgg, tf)
     tests.test_layers(self.layers)
     tests.test_optimize(self.optimize_cross_entropy)
     tests.test_train_nn(self.train_nn)
Beispiel #7
0
def run_tests():
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_for_kitti_dataset(DATA_DIRECTORY)
    tests.test_train_nn(train_nn)
def execute_test_suite():
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_for_kitti_dataset(DATA_DIRECTORY)
    tests.test_train_nn(train_nn)
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'

    # Hyperparameters
    learning_rate = 0.0001
    lr = tf.constant(learning_rate)
    epochs = 100
    batch_size = 10

    # Check TensorFlow Version
    assert LooseVersion(tf.__version__) >= LooseVersion(
        '1.0'
    ), 'Please use TensorFlow version 1.0 or newer.  You are using {}'.format(
        tf.__version__)
    print('TensorFlow Version: {}'.format(tf.__version__))

    # Check for a GPU
    if not tf.test.gpu_device_name():
        warnings.warn(
            'No GPU found. Please use a GPU to train your neural network.')
    else:
        print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))

    # Perform unit tests
    tests.test_for_kitti_dataset(data_dir)
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        correct_label = tf.placeholder(tf.float32,
                                       [None, None, None, num_classes])

        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        output_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            output_layer, correct_label, lr, num_classes)

        # TODO: Train NN using the train_nn function
        sess.run(tf.global_variables_initializer())
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, image_input, correct_label, keep_prob, lr)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, image_input)
Beispiel #10
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        epochs = 5
        batch_size = 5

        # TF placeholders
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)

        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                               num_classes)

        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        saver = tf.train.Saver()

        sess.run(tf.global_variables_initializer())

        if os.path.isfile('./data/model/checkpoint'):
            print("loading check point file...")
            saver.restore(sess, './data/model/model.ckpt')

        print(
            'start training, if you don\'t want training, just set training to False'
        )
        training = False
        if training:
            train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                     cross_entropy_loss, input_image, correct_label, keep_prob,
                     learning_rate)
            saver.save(sess, "./data/model/model.ckpt")

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
Beispiel #11
0
                                       vgg_layer7_out, num_classes,
                                       one_by_one_channels, l2_scale)
        #graph_out = layers_dropout(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, vgg_keep_prob, num_classes, one_by_one_channels)
        #graph_out = layers_deep(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, vgg_keep_prob, num_classes, one_by_one_channels)

        labels = tf.placeholder(tf.bool, name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        logits, train_op, cross_entropy_loss = optimize(
            graph_out, labels, learning_rate)

        # Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, vgg_input, labels, vgg_keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, vgg_keep_prob, vgg_input,
                                      num_classes)
        # OPTIONAL: Apply the trained model to a video


tests.test_load_vgg(load_vgg, tf)
tests.test_layers(layers_regularizer, num_classes)
tests.test_optimize(optimize, num_classes)
tests.test_train_nn(train_nn)

if __name__ == '__main__':
    run()
Beispiel #12
0
def train(epochs: int = None, save_model_freq: int = None, batch_size: int = None, learning_rate: float = None,
          keep_prob: float = None, dataset: str = None):
    """
    Performs the FCN training from begining to end, that is, downloads required datasets and pretrained models,
    constructs the FNC architecture, trains it, and saves the trained model.
    :param epochs: number of epochs for training
    :param save_model_freq: save model each save_model_freq epoch
    :param batch_size: batch size for training
    :param learning_rate: learning rate for training
    :param keep_prob: keep probability for dropout layers for training
    :param dataset: dataset name
    """
    if None in [epochs, save_model_freq, batch_size, learning_rate, keep_prob, dataset]:
        raise ValueError('some parameters were not specified for function "%s"' % train.__name__)

    dataset = DATASETS[dataset]

    if not os.path.exists(dataset.data_root_dir):
        os.makedirs(dataset.data_root_dir)

    # Download Kitti Road dataset
    helper.maybe_download_dataset_from_yandex_disk(dataset)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg_from_yandex_disk(dataset.data_root_dir)

    # Run tests to check that environment is ready to execute the semantic segmentation pipeline
    if dataset.name == 'kitti_road':
        tests.test_for_kitti_dataset(dataset.data_root_dir)
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn, dataset)

    # TODO: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    #  https://www.cityscapes-dataset.com/

    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
        # Path to vgg model
        vgg_path = os.path.join(dataset.data_root_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(dataset.data_training_dir, dataset.image_shape)

        # TODO: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        image_input_tensor, keep_prob_tensor, layer3_out_tensor, layer4_out_tensor, layer7_out_tensor = \
            load_vgg(sess, vgg_path)
        output_layer_tensor = layers(layer3_out_tensor, layer4_out_tensor, layer7_out_tensor, dataset.num_classes)
        correct_label_tensor = tf.placeholder(tf.float32, (None, None, None, dataset.num_classes))
        learning_rate_tensor = tf.placeholder(tf.float32)
        logits_tensor, train_op_tensor, cross_entropy_loss_tensor, softmax_tensor = \
            optimize(output_layer_tensor, correct_label_tensor, learning_rate_tensor, dataset.num_classes)

        iou_tensor, iou_op_tensor = mean_iou(softmax_tensor, correct_label_tensor, dataset.num_classes)

        train_nn(sess, dataset, epochs, save_model_freq, batch_size, learning_rate, keep_prob,
                 get_batches_fn, train_op_tensor, cross_entropy_loss_tensor, image_input_tensor, correct_label_tensor,
                 keep_prob_tensor, learning_rate_tensor, iou_tensor, iou_op_tensor)

        save_model(sess, 'fcn8-final', dataset,
                   epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, keep_prob=learning_rate)