# ema = tf.train.ExponentialMovingAverage(0.999)o
# vars = ema.variables_to_restore()

# load the normalization params if the data is normalized
if normalized_dataset:
    text_file = open(normalized_file_name, "r")
    normalization_params = text_file.read().split()
    text_file.close()

saver = tf.train.Saver()
file_names = os.listdir(dataset_path + validation_path)
file_names = filterImages(file_names)
file_names = addDatasetPath(dataset_path + validation_path, file_names)
# shuffle(file_names)

labels = extractLabels(file_names)

with tf.Session() as sess:
    saver.restore(sess, checkpoint)

    for i in range(len(file_names)):
        x = logits.eval(feed_dict={file_input: file_names[i]})
        print(x)
        x = x[0]
        unnormalizeFromParams(x, normalization_params)
        image = cv2.imread(file_names[i])
        image = drawOneLane(image, x[0], x[1], x[2], "blue")
        image = drawOneLane(image, x[3], x[4], x[5], "yellow")

        label = unnormalizeFromParams(labels[i], normalization_params)
        image = drawOneLane(image, label[0], label[1], label[2], "green")
#             newArr.append(name)
#     return newArr

###Begin main function here###
# create an input tensor
prepare_file_system(summaries_dir)
#load the data set
# A vector of filenames.
image_files = os.listdir(dataset_path)
image_files = filterImages(image_files)

random.shuffle(image_files)
#make the dataset the full path
image_files = addDatasetPath(dataset_path, image_files)
#files variable will be left with only the train ground truth
train_groundTruth = extractLabels(image_files)

#load validation fileset
validation_files = os.listdir(validation_path)
validation_files = filterImages(validation_files)
validation_files = addDatasetPath(validation_path, validation_files)
random.shuffle(validation_files)
validation_groundTruth = extractLabels(validation_files)

assert len(train_groundTruth) == len(image_files)
assert len(validation_groundTruth) == len(validation_files)

filenames_placeholder = tf.placeholder(tf.string)
# filenames = tf.constant(["/var/data/image1.jpg", "/var/data/image2.jpg"])
# `labels[i]` is the label for the image in `filenames[i].
labels_inputPlaceholder = tf.placeholder(tf.float32)
예제 #3
0
import shutil

from dataHelperFunctions import filterImages, addDatasetPath, extractLabels

def normalize(data, mean, stdev):
    return str((data - mean) / stdev)

dataset_path = "D:/cuLane/culane_preprocessing/converted_dataset_percentage_augmented/" #don't forget the slash at the end!

new_dataset_path = "D:/LaneDetectionV2/d_aug_two_lanes_percentage_dataset/"

print("reading data...")
files = os.listdir(dataset_path)
image_files = filterImages(files)
image_files = addDatasetPath(dataset_path, image_files) #merging the full path with the image file names in order to copy them properly later
labels = extractLabels(image_files)

print("read all data...")

#calculate the mean of each
meansOfAllData = []
stdevOfAllData = []
for i in range(len(labels[0])): #assuming that all the length of data will be constant
    print("calculating mean and stddev at index: ", i)
    allDataAcrossSingleIndex = []
    for j in range(len(labels)):
        allDataAcrossSingleIndex.append(float(labels[j][i]))
    mean = statistics.mean(allDataAcrossSingleIndex)
    std = statistics.stdev(allDataAcrossSingleIndex)
    meansOfAllData.append(mean)
    stdevOfAllData.append(std)