def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        epochs = 50
        batch_size = 5

        # TF placeholders
        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)

        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)

        logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)

        train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
             correct_label, keep_prob, learning_rate)

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
Exemple #2
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = ''
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)

        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)

        correct_label = tf.placeholder(tf.int32,
                                       shape=[None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32)

        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)
        sess.run(tf.global_variables_initializer())

        epochs = 30
        batch_size = 5

        # TODO: Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
Exemple #3
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:

        # path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # place holders
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        # load pre trained vgg16 model
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)

        # build fully convolutional layers and get the output layer
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)

        # optimizer and loss
        logits, train_op, loss = optimize(layer_output, correct_label,
                                          learning_rate, num_classes)

        # train network
        train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, loss,
                 input_image, correct_label, keep_prob, learning_rate)

        # save images after segmentation
        output_dir = helper.save_inference_samples(runs_dir, data_dir, sess,
                                                   image_shape, logits,
                                                   keep_prob, input_image)

        # make video from images
        helper.save_inference_video_from_images(output_dir)
Exemple #4
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    epochs = 25
    batch_size = 8
    learning_rate = 0.0005
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    # OPTIONAL: Augment Images for better results
    #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

    # OPTIONAL: Apply the trained model to a video

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Path to vgg model
            vgg_path = os.path.join(data_dir, 'vgg')
            # Create function to get batches
            get_batches_fn = helper.gen_batch_function(
                os.path.join(data_dir, 'data_road/training'), image_shape)

            input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
                sess, vgg_path)

#            layer3_out = tf.stop_gradient(layer3_out)
#            layer4_out = tf.stop_gradient(layer4_out)
#            layer7_out = tf.stop_gradient(layer7_out)

            output = layers(layer3_out, layer4_out, layer7_out, num_classes)

            correct_label = tf.placeholder(
                tf.float32, [None, image_shape[0], image_shape[1], num_classes])

            logits, train_op, cross_entropy_loss = optimize(
                output, correct_label, learning_rate, num_classes)

            train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                     cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)

            helper.save_inference_samples(
                runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
def run():

    print("--------run----------")

    data_dir = './data'
    runs_dir = './runs'

    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(
            data_dir, 'data_road/training'),
                                                   image_shape=IMAGE_SHAPE)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)

        layer_output = layers(layer3_out, layer4_out, layer7_out,
                              NUMBER_OF_CLASSES)

        # optimize
        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, NUMBER_OF_CLASSES)

        # Initialize all variables
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        # TODO: Train NN using the train_nn function
        train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, IMAGE_SHAPE,
                                      logits, keep_prob, input_image)
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:

        # sess.run(tf.global_variables_initializer())
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)
        input, keep, layer3, layer4, layer7 = load_vgg(sess, vgg_path)
        layers_output = layers(layer3, layer4, layer7, num_classes)

        #image_placeholder = tf.placeholder(tf.float32, shape=(None, image_shape[0], image_shape[1], 3))
        label_placeholder = tf.placeholder(tf.bool,
                                           shape=(None, image_shape[0],
                                                  image_shape[1], num_classes))
        #keep_prob = tf.placeholder(tf.float32, name ='keep')
        learning_rate_placeholder = tf.placeholder(tf.float32, name='rate')

        logits, train_op, cross_entropy, pred, probabilities = optimize(
            layers_output, label_placeholder, 0.001, num_classes)

        train_nn(sess, 100, 10, get_batches_fn, train_op, cross_entropy, input,
                 label_placeholder, keep, learning_rate_placeholder, pred,
                 probabilities)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function

        # TODO: Train NN using the train_nn function

        # TODO: Save inference data using helper.save_inference_samples

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep, input)
Exemple #7
0
def run():
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        #augmentation_args = {'seed': 1123, 'scale': 0.05, 'rotate':0.03, 'shear': 0.03, 'translate':16}
        augmentation_args = None
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'),
            image_shape,
            num_classes,
            augmentation_args=augmentation_args)

        # Build NN using load_vgg, layers, and optimize function
        vgg_input, vgg_keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)

        graph_out = layers_regularizer(vgg_layer3_out, vgg_layer4_out,
                                       vgg_layer7_out, num_classes,
                                       one_by_one_channels, l2_scale)
        #graph_out = layers_dropout(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, vgg_keep_prob, num_classes, one_by_one_channels)
        #graph_out = layers_deep(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, vgg_keep_prob, num_classes, one_by_one_channels)

        labels = tf.placeholder(tf.bool, name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        logits, train_op, cross_entropy_loss = optimize(
            graph_out, labels, learning_rate)

        # Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, vgg_input, labels, vgg_keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, vgg_keep_prob, vgg_input,
                                      num_classes)
Exemple #8
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    epochs = 15
    batch_size = 5

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # Config to turn on JIT compilation
    config = tf.ConfigProto()
    config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
    config.gpu_options.allocator_type = 'BFC'

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session(config=config) as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)

        # Train NN using the train_nn function
        correct_label = tf.placeholder(tf.int32)
        learning_rate = tf.placeholder(tf.float32)
        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)

        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
Exemple #9
0
def run():
    num_classes = 2
    epochs = 15
    batches = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    tf.reset_default_graph()
    print("Starting training with {} epochs and {} batches...".format(
        epochs, batches))

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')

        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        final_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        label = tf.placeholder(tf.int32, shape=[None, None, None, num_classes])
        learning_rate = tf.placeholder(tf.float32)
        logits, train_op, loss = optimize(final_layer, label, learning_rate,
                                          num_classes)

        # TODO: Save the trained model (preparation)
        saver = tf.train.Saver()

        # TODO: Restore a saved model here:
        # saver.restore(sess, './runs/sem_seg_model.ckpt')

        # TODO: Train NN using the train_nn function
        sess.run(tf.global_variables_initializer())
        train_nn(sess, epochs, batches, get_batches_fn, train_op, loss,
                 input_image, label, keep_prob, learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)

        # TODO: save model
        saver.save(sess, './runs/sem_seg_model.ckpt')
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    print('vgg has been loaded')

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    correct_label = tf.placeholder(tf.float32,
                                   [None, *image_shape, num_classes],
                                   name='correct_label')
    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, train_loss = optimize(nn_last_layer, correct_label,
                                                learning_rate, num_classes)

        # tf.summary.FileWriter('./logs2/1',sess.graph)

        # TODO: Train NN using the train_nn function
        epochs = 50
        batch_size = 16
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 train_loss, image_input, correct_label, keep_prob,
                 learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, image_input)
Exemple #11
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    epochs = 50
    batch_size = 10
    label_image = tf.placeholder(tf.float32, (None, None, None, num_classes))
    learning_rate = tf.placeholder(tf.float32)

    saver = tf.train.Saver()

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')

        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        vgg_input, vgg_keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
        print(vgg_layer3_out.shape)
        print(vgg_layer4_out.shape)
        print(vgg_layer7_out.shape)
        fcn8s = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
        logits, optimizer, cross_entropy_loss = optimize(fcn8s, label_image, learning_rate, num_classes)

        # Train NN using the train_nn function
        sess.run(tf.global_variables_initializer())
        train_nn(sess, epochs, batch_size, get_batches_fn, optimizer, cross_entropy_loss,
                 vgg_input, label_image, vgg_keep_prob, learning_rate)

        # Save the variables to disk.
        save_path = saver.save(sess, "/fcn8s.ckpt")
        print("Model saved in file: %s" % save_path)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, vgg_keep_prob, vgg_input)
Exemple #12
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)
    EPOCHS = 20
    BATCH_SIZE = 4

    # Download pretrained vgg model
    print("start")
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        learning_rate = tf.placeholder(dtype=tf.float32, name="learning_rate")
        correct_label = tf.placeholder(dtype=tf.int32,
                                       shape=[None, None, None, num_classes],
                                       name="correct_label")
        vgg_input_tensor, vgg_keep_prob_tensor, vgg_layer3_out_tensor, vgg_layer4_out_tensor, vgg_layer7_out_tensor = \
                load_vgg(sess, vgg_path)
        nn_last_layer =  layers(vgg_layer3_out_tensor, vgg_layer4_out_tensor, vgg_layer7_out_tensor,\
                num_classes)

        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        # TODO: Train NN using the train_nn function
        train_nn(sess = sess, epochs = EPOCHS, batch_size = BATCH_SIZE, get_batches_fn = get_batches_fn, \
                train_op = train_op, cross_entropy_loss = cross_entropy_loss, input_image = vgg_input_tensor,
           correct_label = correct_label, keep_prob = vgg_keep_prob_tensor, learning_rate = learning_rate)
        # TODO: Save inference data using helper.save_inference_samples
        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, vgg_keep_prob_tensor,
                                      vgg_input_tensor)
def run():
    NUM_CLASSES = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)
    EPOCHS = 30
    BATCH_SIZE = 8

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # TF Placeholders
    truth_label = tf.placeholder(
        tf.float32,
        shape=[None, image_shape[0], image_shape[1], NUM_CLASSES],
        name="truth_lbls")
    learning_rate = tf.placeholder(tf.float32)

    with tf.Session() as sess:

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3_out, layer4_out, layer7_out, NUM_CLASSES)
        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, truth_label, learning_rate, NUM_CLASSES)

        # Train NN using the train_nn function
        train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, truth_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)

        # OPTIONAL: Apply the trained model to a video
        # See advanced lane finding from Term 1
        clip1 = VideoFileClip(data_dir + '/challenge_video.mp4').subclip(1, 20)
        projectClip = clip1.fx(clip_find_lane, sess, logits, keep_prob,
                               input_image, image_shape)
        projectClip.write_videofile(runs_dir + '/challenge_results.mp4',
                                    audio=False)
Exemple #14
0
def run():
    num_classes = 2
    image_shape = (160, 576)  # KITTI dataset uses 160x576 images
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    batch_size = 8
    epochs = 40

    with tf.Session() as sess:

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # build NN using load_vgg, layers, and optimize function
        correct_label = tf.placeholder(tf.float32,
                                       [None, None, None, num_classes])
        learning_rate = tf.placeholder(tf.float32)

        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)
        output = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                        num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            output, correct_label, learning_rate, num_classes)

        # train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)

        # for saving the video with the model applied for the segmentation
        save_inference_video(sess,
                             image_shape,
                             logits,
                             keep_prob,
                             input_image,
                             video_file="videos/test.mp4")
Exemple #15
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    # Defining epochs and batch size
    epochs = 20
    batch_size = 2
    #GPU memory configuration
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allocator_type = 'BFC'
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)
        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        correct_label = tf.placeholder(tf.float32,
                                       shape=[None, None, None, num_classes])
        learning_rate = tf.placeholder(tf.float32)
        # Define NN structure
        l1, keep, l3, l4, l7 = load_vgg(sess, vgg_path)
        out = layers(l3, l4, l7, num_classes)
        # Optimizing
        logits, train_op, cross_entropy_loss = optimize(
            out, correct_label, learning_rate, num_classes)
        # TODO: Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, l1, correct_label, keep, learning_rate)
        # TODO: Save inference data using helper.save_inference_samples
        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep, l1)
Exemple #16
0
def run():
    num_classes = 2
    epochs = 6
    learning_rate = .001
    batch_size = 1
    image_shape = (160, 576)
    correct_labels = tf.placeholder(
        tf.int32, [1, image_shape[0], image_shape[1], num_classes])
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:

        # Saver function

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        #call fucntions created above
        input_image, keep_prob, layer_3, layer_4, layer_7 = load_vgg(
            sess, vgg_path)
        final_layer_out = layers(layer_3, layer_4, layer_7, num_classes)
        logits, train_op, cross_entropy = optimize(final_layer_out,
                                                   correct_labels,
                                                   learning_rate, num_classes)
        saver = tf.train.Saver()
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy, input_image, correct_labels, keep_prob,
                 learning_rate)

        saver.save(sess, './Inferer')

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
def run():
    global args

    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        learning_rate = tf.placeholder(tf.float32)
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes])

        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, optimizer, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)

        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        if args.load:
            checkpoint = tf.train.get_checkpoint_state("saved_nn")
            if checkpoint and checkpoint.model_checkpoint_path:
                saver.restore(sess, checkpoint.model_checkpoint_path)
                print("Resuming training:", checkpoint.model_checkpoint_path)

        if args.train:
            train_nn(sess, args.epochs, args.batch, get_batches_fn, optimizer,
                     cross_entropy_loss, input_image, correct_label, keep_prob,
                     learning_rate)

        saver.save(sess, 'saved_nn/model_xxx')

        if args.save:
            helper.save_inference_samples(runs_dir, data_dir, sess,
                                          image_shape, logits, keep_prob,
                                          input_image)
Exemple #18
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # Build NN using load_vgg, layers, and optimize function
        epochs = 30
        #        epochs = 1
        batch_size = 8

        # TF placeholders
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)

        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                               num_classes)

        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        # Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        saver = tf.train.Saver()
        save_path = saver.save(sess, model_path)
        print("Save Model to file: %s" % save_path)

        # predict the testing data and save the augmented images
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
def run():
    num_classes = 2
    num_augmentations = 10
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/
    epochs = 25
    batch_size = 8

    # Generate some augmented data
    helper.gen_augmented_data(
        os.path.join(data_dir, 'data_road/training'), image_shape,
        os.path.join(data_dir, 'data_road/augmentations'), num_augmentations)

    with tf.Session() as sess:

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/augmentations'), image_shape)

        correct_label = tf.placeholder(tf.int32)
        learning_rate = tf.placeholder(tf.float32)

        # Build NN using load_vgg, layers, and optimize function
        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        last_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            last_layer, correct_label, learning_rate, num_classes)

        #Train NN using the train_nn function
        sess.run(tf.global_variables_initializer())
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, image_input, correct_label, keep_prob,
                 learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, image_input)
Exemple #20
0
def run():
    num_classes = 2
    num_channels = 3
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Additional variables
    batch_size = 8
    epochs     = 32
    learning_rate = 1e-3

    correct_label = tf.placeholder(tf.float32, shape=(None, image_shape[0], image_shape[1], num_classes))

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')

        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Get Pre-trained model
        image_input, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
        # FCN output
        output = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
        # Optimizer
        logits, train_op, cross_entropy_loss = optimize(output, correct_label, learning_rate, num_classes)

        # Training
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, image_input, correct_label, keep_prob, learning_rate)

        saver = tf.train.Saver()
        saver.save(sess, './final_model/fcn8.ckpt')
        saver.export_meta_graph('./final_model/final.meta')
        tf.train.write_graph(sess.graph_def, "./final_model/", "final.pb", False)

        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, image_input)
Exemple #21
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    #HYPERPARAMETERS
    epochs = 10
    batch_size = 16

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        #get_batches_test = helper.gen_test_output(os.path.join(data_dir, 'data_road/testing'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
        learning_rate = tf.placeholder(tf.float32, name="alpha")

        correct_label = tf.placeholder(tf.float32,
                                       (None, None, None, num_classes),
                                       name="y")

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3, layer4, layer7 = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3, layer4, layer7, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/
    correct_label = tf.placeholder(tf.int32, (None, None, None, num_classes))
    learning_rate = tf.placeholder(tf.float32)

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, total_loss = optimize(nn_last_layer, correct_label,
                                                learning_rate, num_classes)

        # TODO: Train NN using the train_nn function

        epochs = 30
        batch_size = 16
        print('3', tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 total_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # TODO: Save inference data using helper.save_inference_samples

        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
        # OPTIONAL: Apply the trained model to a video
        image2video()
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'

    # Hyper-parameters
    epochs = 20
    batches = 1
    learning_rate = 0.0001

    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        correct_label = tf.placeholder(
            tf.float32, [None, image_shape[0], image_shape[1], num_classes])
        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, tf.constant(learning_rate),
            num_classes)

        # TODO: Train NN using the train_nn function
        train_nn(sess, epochs, batches, get_batches_fn, train_op,
                 cross_entropy_loss, image_input, correct_label, keep_prob,
                 learning_rate, runs_dir, data_dir, image_shape, logits)

        # TODO: Save inference data using helper.save_inference_samples
        #  helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        save_inference_samples1(runs_dir, data_dir, sess, image_shape, logits,
                                keep_prob, image_input, 'END_EPOCH')
Exemple #24
0
def run(epochs=48, batch_size=17):
    num_classes = 2
    image_shape = (160, 576)  # KITTI dataset uses 160x576 images
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        # Placeholders
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes])
        learning_rate = tf.placeholder(tf.float32)

        # Load the VGG16
        input_image, keep_prob, pool3, pool4, conv7 = load_vgg(sess, vgg_path)
        # Add new layers
        layer_output = layers(pool3, pool4, conv7, num_classes)
        # Create optimizer
        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)

        # Train NN using the train_nn function
        # TODO: Add model saving
        # saver = tf.train.Saver()

        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
Exemple #25
0
def run(_):
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # Build NN using load_vgg, layers, and optimize functions
        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes, FLAGS.L2_SCALE, FLAGS.STDDEV)

        # Define exp dacay learning rate
        global_step = tf.train.get_or_create_global_step()
        lr = learning_rate(global_step, FLAGS.INIT_LEARNING_RATE, FLAGS.LR_DECAY_FACTOR, FLAGS.EPOCHS_PER_DECAY, FLAGS.BATCH_SIZE)

        # Build the TF loss, metrics, and optimizer operations
        correct_label = tf.placeholder(tf.float32, [None, None, None, num_classes])
        logits, train_op, cross_entropy_loss, accuracy, mean_iou, metrics_op = optimize(nn_last_layer, correct_label, lr, num_classes, global_step)

        # Train NN using the train_nn function
        train_nn(sess, FLAGS.EPOCHS, FLAGS.BATCH_SIZE, FLAGS.KEEP_PROB, get_batches_fn, train_op, cross_entropy_loss, accuracy, mean_iou, metrics_op, input_image, correct_label, keep_prob, lr)

        # Save the all the graph variables to disk
        saver = tf.train.Saver()
        if not os.path.isdir("./model"):
            os.makedirs("./model")
        save_path = saver.save(sess, "./model/model.ckpt")
        print("Model saved in path: %s \n" % save_path)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)

        # Apply the trained model to a video
        video_dir = './videos/'
        for file_name in os.listdir(video_dir):
            helper.inference_on_video(video_dir + file_name)
def run():
    num_classes = 2
    image_shape = (160, 576)
    epochs = 30
    batch_size = 8

    correct_label = tf.placeholder(tf.float32, [None, image_shape[0], image_shape[1], num_classes])
    learning_rate = tf.placeholder(tf.float32)

    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    config = tf.ConfigProto(log_device_placement=False)
    # config.gpu_options.allow_growth = True
    # config.gpu_options.per_process_gpu_memory_fraction = 0.4

    with tf.Session(config=config) as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)

        # TODO: Train NN using the train_nn function
        # Initialize all variables
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        saver = tf.train.Saver()

        train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
                 correct_label, keep_prob, learning_rate, saver=saver)

        # TODO: Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
Exemple #27
0
def run():
    epochs = 50#10
    batch_size = 5#1
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:

        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # TODO: Build NN using load_vgg, layers, and optimize function
        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
        nn_last_layer = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy = optimize(nn_last_layer, correct_label, learning_rate, num_classes)

        # Tensorboard
        #merged = tf.summary.merge_all()
        #train_writer = tf.summary.FileWriter('log/train', sess.graph)

        # TODO: Train NN using the train_nn function
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy,
                 input_image, correct_label, keep_prob, learning_rate)

        # TODO: Save inference data using helper.save_inference_samples
        saver = tf.train.Saver()
        saver.save(sess, 'checkpoints/model.ckpt')
        saver.export_meta_graph('checkpoints/model.meta')
        tf.train.write_graph(sess.graph_def, './checkpoints/', 'model.pb', False)

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
Exemple #28
0
def run():
    num_classes = 2
    image_shape = (160, 576)  # KITTI dataset uses 160x576 images
    epochs = 40
    batch_size = 5
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results

        # Build NN using load_vgg, layers, and optimize function

        learning_rate = 0.001
        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes],
                                       name='correct_label')

        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                               num_classes)

        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        # Train NN using the train_nn function

        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, input_image, correct_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)
Exemple #29
0
def run():
    tests.test_for_kitti_dataset(DATA_DIR)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(DATA_DIR)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    printStatistics()

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(DATA_DIR, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(DATA_DIR, 'data_road/training'), IMAGE_SHAPE)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        print("Load VGG model...")
        input_image, keep_prob, layer_3, layer_4, layer_7 = load_vgg(sess, vgg_path)
        layer_output = layers(layer_3, layer_4, layer_7, NUM_CLASSES)

        label = tf.placeholder(tf.int32, shape=[None, None, None, NUM_CLASSES])
        learning_rate = tf.placeholder(tf.float32)

        iou_obj = None

        if IOU_ENABLED:
            logits, train_op, cross_entropy_loss, iou_obj = optimize(layer_output, label, learning_rate, NUM_CLASSES, iou_enabled=IOU_ENABLED)
        else:
            logits, train_op, cross_entropy_loss = optimize(layer_output, label, learning_rate, NUM_CLASSES)

        # Train NN using the train_nn function
        print("Start training...")
        train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, cross_entropy_loss, input_image, label, keep_prob,
                 learning_rate, iou_obj)

        # Safe the trained model
        print("Save trained model...")
        saver = tf.train.Saver()
        saver.save(sess, './runs/semantic_segmentation_model.ckpt')

        # Save inference data using helper.save_inference_samples
        print("Save inference samples...")
        helper.save_inference_samples(RUNS_DIR, DATA_DIR, sess, IMAGE_SHAPE, logits, keep_prob, input_image)
def run():
    print("Run")
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'

    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/
    # Note: Not done
    
    kBatchSize = 5
    kEpochs = 10

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
        # Note: Not implemented.

        correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        # Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
        
        # Train NN using the train_nn function
        train_nn(sess, kEpochs, kBatchSize, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
        
        # Save the variables to disk.
        print("Saving model...")
        saver = tf.train.Saver()
        save_path = saver.save(sess, "./model/semantic_segmentation_model.ckpt")
        print("Model saved in path: %s" % save_path)
def run():

    tests.test_for_kitti_dataset(DATA_DIR)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(DATA_DIR)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(DATA_DIR, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(DATA_DIR, 'data_road/training'), IMAGE_SHAPE)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function

        ##### Encoder #####
        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)

        ##### Decoder #####
        output_layer = layers(layer3_out, layer4_out, layer7_out, NUM_CLASSES)

        # tf placeholders
        correct_label = tf.placeholder(tf.int32,
                                       (None, None, None, NUM_CLASSES),
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        logits, train_op, cross_entropy_loss = optimize(
            output_layer, correct_label, learning_rate, NUM_CLASSES)
        logits = tf.identity(logits, name='logits')

        # Train NN using the train_nn function
        train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op,
                 cross_entropy_loss, image_input, correct_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(RUNS_DIR, DATA_DIR, sess, IMAGE_SHAPE,
                                      logits, keep_prob, image_input)
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    correct_label_tensor = tf.placeholder("uint8", None)
    learn_rate_tensor = tf.placeholder("float", None)

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')

        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        input_tensor, keep_prob_tensor, out3_tensor, out4_tensor, out7_tensor = load_vgg(
            sess, vgg_path)
        final_output = layers(out3_tensor, out4_tensor, out7_tensor,
                              num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            final_output, correct_label_tensor, learn_rate_tensor, num_classes)

        train_nn(sess,
                 epochs=K_EPOCHS,
                 batch_size=K_BATCH_SIZE,
                 get_batches_fn=get_batches_fn,
                 train_op=train_op,
                 cross_entropy_loss=cross_entropy_loss,
                 input_image=input_tensor,
                 correct_label=correct_label_tensor,
                 keep_prob=keep_prob_tensor,
                 learning_rate=learn_rate_tensor)

        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob_tensor, input_tensor)
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        epochs        = 50
        batch_size    = 8

        correct_label = tf.placeholder(tf.int32, (None, None, None, num_classes))
        learning_rate = tf.placeholder(tf.float32)

        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)

        # Train NN using the train_nn function
        train_nn(sess,
                 epochs,
                 batch_size,
                 get_batches_fn,
                 train_op,
                 cross_entropy_loss,
                 input_image,
                 correct_label,
                 keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
Exemple #34
0
def perform_tests():
    tests.test_for_kitti_dataset(data_dir)
    tests.test_load_vgg(load_vgg, tf)
    tests.test_layers(layers)
    tests.test_optimize(optimize)
    tests.test_train_nn(train_nn)