Example #1
0
def save_inference_samples(runs_dir, data_dir, sess, image_shape, logits,
                           keep_prob, input_image, epoch):
    """
    save model weights and generate samples.
    :param runs_dir: directory where model weights and samples will be saved
    :param data_dir: directory where the Kitty dataset is stored
    :param sess: TF Session
    :param image_shape: shape of the input image for prediction
    :param logits: TF Placeholder for the FCN prediction
    :param keep_prob: TF Placeholder for dropout keep probability
    :param input_image: TF Placeholder for input images
    :param epoch: Number of epochs or Final label
    """
    # Make folder for current epoch
    output_dir = os.path.join(runs_dir, str(epoch))
    assert not os.path.exists(output_dir)
    os.makedirs(output_dir)

    # Run NN on test images and save them to HD
    print('Epoch {} finished. Saving test images to: {}'.format(
        epoch, output_dir))
    image_outputs = helper.gen_test_output(
        sess, logits, keep_prob, input_image,
        os.path.join(data_dir, 'data_road/testing'), image_shape)
    for name, image in image_outputs:
        scipy.misc.imsave(os.path.join(output_dir, name), image)

    # Save the model
    saver = tf.train.Saver()
    filefcn_path = os.path.join(output_dir, 'model.ckpt'.format(epoch))
    saver.save(sess, filefcn_path)
    print('Model saved to: {}'.format(filefcn_path))
Example #2
0
 def training_visulize(self):
     reshape_logits = tf.reshape(self.logits, (-1, self.n_classes))
     viz_images = []
     img_output = helper.gen_test_output(self.sess, reshape_logits, self.dropout, self.images_batch, self.viz_dir,
                                         self.input_shape)
     for _, ouput in img_output:
         viz_images.append(ouput)
     return np.array(viz_images)
def save_inference_samples1(runs_dir, data_dir, sess, image_shape, logits,
                            keep_prob, input_image, epoch):
    # Make folder for current run
    output_dir = os.path.join(runs_dir, str(time.time()))
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.makedirs(output_dir)

    # Run NN on test images and save them to HD
    print('Training Finished. Saving test images to: {}'.format(output_dir))
    image_outputs = helper.gen_test_output(
        sess, logits, keep_prob, input_image,
        os.path.join(data_dir, 'data_road/testing'), image_shape)
    for name, image in image_outputs:
        scipy.misc.imsave(os.path.join(output_dir, name), image)

    # keep checkpoints at different steps while training a model. https://www.tensorflow.org/api_docs/python/tf/train/Saver
    saver = tf.train.Saver()
    saver.save(sess, os.path.join(output_dir, 'SS_fcn_{}.ckpt'.format(epoch)))
Example #4
0
def segment_images(runs_dir, data_dir, sess, image_shape, logits, keep_prob,
                   input_image, num_classes):
    # Make folder for current run
    output_dir = os.path.join(runs_dir, str(time.time()))
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.makedirs(output_dir)

    # Run NN on test images and save them to HD
    print('Training Finished. Saving test images to: {}'.format(output_dir))
    image_outputs = gen_test_output(
        #sess, logits, keep_prob, input_image, os.path.join(data_dir, 'data_road/testing'), image_shape)
        sess,
        logits,
        keep_prob,
        input_image,
        os.path.join(data_dir, 'data/testing'),
        image_shape,
        num_classes)
    for name, image in image_outputs:
        scipy.misc.imsave(os.path.join(output_dir, name), image)
def run(load_existing=False):
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(data_dir, image_shape)

        image_input, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)
        output_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                              num_classes)

        epochs = 32
        batch_size = 16

        labels = tf.placeholder(tf.float32,
                                shape=((None, ) + image_shape +
                                       (num_classes, )),
                                name='labels_placeholder')
        learning_rate = tf.placeholder(tf.float32)

        logits, train_op, cross_entropy_loss = optimize(
            output_layer, labels, learning_rate, num_classes)

        correct_pred = tf.equal(tf.argmax(output_layer, 3),
                                tf.argmax(labels, 3))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32),
                                  name='accuracy')

        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()

        if load_existing:
            saver.restore(sess, "./model/model.ckpt")

            print("Model loaded. Generating images...")
            image_outputs = helper.gen_test_output(
                sess, logits, keep_prob, image_input,
                os.path.join(data_dir, 'video_frames'), image_shape)
            print("Images generated!")
            for name, image in image_outputs:
                scipy.misc.imsave(os.path.join('./segmented_video', name),
                                  image)

            helper.save_inference_samples(runs_dir, data_dir, sess,
                                          image_shape, logits, keep_prob,
                                          image_input)

        else:
            train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                     cross_entropy_loss, image_input, labels, keep_prob,
                     learning_rate, accuracy, saver)

            helper.save_inference_samples(runs_dir, data_dir, sess,
                                          image_shape, logits, keep_prob,
                                          image_input)