def create_validation_file(dataset_path):
    # dataset_path = "./filtered-filtered-dataset-twoLane-Normalized/"
    validation_percent = 0.1
    validation_directory = "Validation"

    #reset the directory - If you wanna use this first move the pictures back because it doesn't do that automatically
    if not gfile.Exists(dataset_path + validation_directory):
        # gfile.DeleteRecursively(dataset_path + validation_directory)
        gfile.MakeDirs(dataset_path + validation_directory)

    #get all the images inside the directory
    files = os.listdir(dataset_path)
    image_files = filterImages(files)
    image_files = addDatasetPath(dataset_path, image_files)

    #split it into validation files
    validation_amount = int(validation_percent * len(image_files))

    #this will randomly pop an x amount of labels
    validation_files = [image_files.pop(random.randrange(len(image_files))) for _ in range(validation_amount)]

    #move the validation files to a new folder
    for name in validation_files:
        #move images
        name_split = name.split("/")
        new_dest = dataset_path + validation_directory + "/" + name_split[-1]
        os.rename(name, new_dest)
        print("moved: ", name, "to", new_dest)

        #move text files
        extension_split = os.path.splitext(name_split[-1])
        text_file_src_name = dataset_path + extension_split[0] + ".txt"
        new_txt_dest = dataset_path + validation_directory + "/" + extension_split[0] + ".txt"
        os.rename(text_file_src_name, new_txt_dest)
        print("moved: ", text_file_src_name, "to", new_txt_dest)
        print("---------------------")
#         extension = listName[-1]
#         if extension == ".jpg":
#             newArr.append(name)
#     return newArr

###Begin main function here###
# create an input tensor
prepare_file_system(summaries_dir)
#load the data set
# A vector of filenames.
image_files = os.listdir(dataset_path)
image_files = filterImages(image_files)

random.shuffle(image_files)
#make the dataset the full path
image_files = addDatasetPath(dataset_path, image_files)
#files variable will be left with only the train ground truth
train_groundTruth = extractLabels(image_files)

#load validation fileset
validation_files = os.listdir(validation_path)
validation_files = filterImages(validation_files)
validation_files = addDatasetPath(validation_path, validation_files)
random.shuffle(validation_files)
validation_groundTruth = extractLabels(validation_files)

assert len(train_groundTruth) == len(image_files)
assert len(validation_groundTruth) == len(validation_files)

filenames_placeholder = tf.placeholder(tf.string)
# filenames = tf.constant(["/var/data/image1.jpg", "/var/data/image2.jpg"])
# Restore using exponential moving average since it produces (1.5-2%) higher
# accuracy
# ema = tf.train.ExponentialMovingAverage(0.999)o
# vars = ema.variables_to_restore()

# load the normalization params if the data is normalized
if normalized_dataset:
    text_file = open(normalized_file_name, "r")
    normalization_params = text_file.read().split()
    text_file.close()

saver = tf.train.Saver()
file_names = os.listdir(dataset_path + validation_path)
file_names = filterImages(file_names)
file_names = addDatasetPath(dataset_path + validation_path, file_names)
# shuffle(file_names)

labels = extractLabels(file_names)

with tf.Session() as sess:
    saver.restore(sess, checkpoint)

    for i in range(len(file_names)):
        x = logits.eval(feed_dict={file_input: file_names[i]})
        print(x)
        x = x[0]
        unnormalizeFromParams(x, normalization_params)
        image = cv2.imread(file_names[i])
        image = drawOneLane(image, x[0], x[1], x[2], "blue")
        image = drawOneLane(image, x[3], x[4], x[5], "yellow")
Ejemplo n.º 4
0
import statistics
import shutil

from dataHelperFunctions import filterImages, addDatasetPath, extractLabels

def normalize(data, mean, stdev):
    return str((data - mean) / stdev)

dataset_path = "D:/cuLane/culane_preprocessing/converted_dataset_percentage_augmented/" #don't forget the slash at the end!

new_dataset_path = "D:/LaneDetectionV2/d_aug_two_lanes_percentage_dataset/"

print("reading data...")
files = os.listdir(dataset_path)
image_files = filterImages(files)
image_files = addDatasetPath(dataset_path, image_files) #merging the full path with the image file names in order to copy them properly later
labels = extractLabels(image_files)

print("read all data...")

#calculate the mean of each
meansOfAllData = []
stdevOfAllData = []
for i in range(len(labels[0])): #assuming that all the length of data will be constant
    print("calculating mean and stddev at index: ", i)
    allDataAcrossSingleIndex = []
    for j in range(len(labels)):
        allDataAcrossSingleIndex.append(float(labels[j][i]))
    mean = statistics.mean(allDataAcrossSingleIndex)
    std = statistics.stdev(allDataAcrossSingleIndex)
    meansOfAllData.append(mean)
from statistics import mean, stdev
import create_validation_file


def normalize(data, mean, stdev):
    return str((data - mean) / stdev)


# dataset_path = "./filtered-filtered-dataset-twoLane-Normalized - validation incorrect/"
dataset_path = "./augment_all_v2/"
#reset the directory - If you wanna use this first move the pictures back because it doesn't do that automatically

#get all the images inside the directory
files = os.listdir(dataset_path)
image_files = filterImages(files)
image_files = addDatasetPath(dataset_path, image_files)
labels = extractLabels(image_files)

#I apologize to anyone that has to maintain this code, but it wasn't mean to be extendable, just one off and fast
#if you want to use it for a different dataset and with more coefficients, consider refactoring

#this is for normalization later
#divide by the max
aCoeffArray = []
bCoeffArray = []
cCoeffArray = []

aCoeffArrayLane2 = []
bCoeffArrayLane2 = []
cCoeffArrayLane2 = []