def create_validation_file(dataset_path):
    # dataset_path = "./filtered-filtered-dataset-twoLane-Normalized/"
    validation_percent = 0.1
    validation_directory = "Validation"

    #reset the directory - If you wanna use this first move the pictures back because it doesn't do that automatically
    if not gfile.Exists(dataset_path + validation_directory):
        # gfile.DeleteRecursively(dataset_path + validation_directory)
        gfile.MakeDirs(dataset_path + validation_directory)

    #get all the images inside the directory
    files = os.listdir(dataset_path)
    image_files = filterImages(files)
    image_files = addDatasetPath(dataset_path, image_files)

    #split it into validation files
    validation_amount = int(validation_percent * len(image_files))

    #this will randomly pop an x amount of labels
    validation_files = [image_files.pop(random.randrange(len(image_files))) for _ in range(validation_amount)]

    #move the validation files to a new folder
    for name in validation_files:
        #move images
        name_split = name.split("/")
        new_dest = dataset_path + validation_directory + "/" + name_split[-1]
        os.rename(name, new_dest)
        print("moved: ", name, "to", new_dest)

        #move text files
        extension_split = os.path.splitext(name_split[-1])
        text_file_src_name = dataset_path + extension_split[0] + ".txt"
        new_txt_dest = dataset_path + validation_directory + "/" + extension_split[0] + ".txt"
        os.rename(text_file_src_name, new_txt_dest)
        print("moved: ", text_file_src_name, "to", new_txt_dest)
        print("---------------------")
# def filterImages(array):
#     newArr = []
#     for name in array:
#         listName = os.path.splitext(name)
#         extension = listName[-1]
#         if extension == ".jpg":
#             newArr.append(name)
#     return newArr

###Begin main function here###
# create an input tensor
prepare_file_system(summaries_dir)
#load the data set
# A vector of filenames.
image_files = os.listdir(dataset_path)
image_files = filterImages(image_files)

random.shuffle(image_files)
#make the dataset the full path
image_files = addDatasetPath(dataset_path, image_files)
#files variable will be left with only the train ground truth
train_groundTruth = extractLabels(image_files)

#load validation fileset
validation_files = os.listdir(validation_path)
validation_files = filterImages(validation_files)
validation_files = addDatasetPath(validation_path, validation_files)
random.shuffle(validation_files)
validation_groundTruth = extractLabels(validation_files)

assert len(train_groundTruth) == len(image_files)
'''Reads through all the images in a folder one by one to find a corrupted image'''

import os
import tensorflow as tf
from dataHelperFunctions import filterImages

dataset_path = "./Full dataset - normalized/"
all_images = os.listdir(dataset_path)
all_images = filterImages(all_images)

name = tf.placeholder(tf.string)
image_string = tf.read_file(name)
image_decoded = tf.image.decode_jpeg(image_string)

with tf.Session() as sess:
    for imageName in all_images:
        print(imageName)
        read_image = sess.run(image_decoded,
                              feed_dict={name: dataset_path + imageName})
    logits, endpoints = mobileNet_v3.mobilenet(images, output_regression_count)

# Restore using exponential moving average since it produces (1.5-2%) higher
# accuracy
# ema = tf.train.ExponentialMovingAverage(0.999)o
# vars = ema.variables_to_restore()

# load the normalization params if the data is normalized
if normalized_dataset:
    text_file = open(normalized_file_name, "r")
    normalization_params = text_file.read().split()
    text_file.close()

saver = tf.train.Saver()
file_names = os.listdir(dataset_path + validation_path)
file_names = filterImages(file_names)
file_names = addDatasetPath(dataset_path + validation_path, file_names)
# shuffle(file_names)

labels = extractLabels(file_names)

with tf.Session() as sess:
    saver.restore(sess, checkpoint)

    for i in range(len(file_names)):
        x = logits.eval(feed_dict={file_input: file_names[i]})
        print(x)
        x = x[0]
        unnormalizeFromParams(x, normalization_params)
        image = cv2.imread(file_names[i])
        image = drawOneLane(image, x[0], x[1], x[2], "blue")
Example #5
0
import os
import statistics
import shutil

from dataHelperFunctions import filterImages, addDatasetPath, extractLabels

def normalize(data, mean, stdev):
    return str((data - mean) / stdev)

dataset_path = "D:/cuLane/culane_preprocessing/converted_dataset_percentage_augmented/" #don't forget the slash at the end!

new_dataset_path = "D:/LaneDetectionV2/d_aug_two_lanes_percentage_dataset/"

print("reading data...")
files = os.listdir(dataset_path)
image_files = filterImages(files)
image_files = addDatasetPath(dataset_path, image_files) #merging the full path with the image file names in order to copy them properly later
labels = extractLabels(image_files)

print("read all data...")

#calculate the mean of each
meansOfAllData = []
stdevOfAllData = []
for i in range(len(labels[0])): #assuming that all the length of data will be constant
    print("calculating mean and stddev at index: ", i)
    allDataAcrossSingleIndex = []
    for j in range(len(labels)):
        allDataAcrossSingleIndex.append(float(labels[j][i]))
    mean = statistics.mean(allDataAcrossSingleIndex)
    std = statistics.stdev(allDataAcrossSingleIndex)