width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# grab the list of images that we'll be describing, then extract
# the class label names from the image paths
print("[INFO]: Loading images....")
imagePaths = list(paths.list_images(args["dataset"]))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

# Initialize the image preprocessors
aap = AspectAwarePreprocessor(img_size, img_size)
itap = ImageToArrayPreprocessor()

# Load the dataset and scale the raw pixel intensities to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, itap])
(data, labels) = sdl.load(imagePaths, verbose=500)
#data = data.astype("float") / 255.0
data = data.astype("float")
mean = np.mean(data, axis=0)
data -= mean

# Split the data into training data (75%) and testing data (25%)
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)
Beispiel #2
0
flower_className = ['Daffodil', 'Snowdrop', 'Lily_Valley', 'Bluebell',
                    'Crocus', 'Iris', 'Tigerlily', 'Tulip',
                    'Fritillary', 'Sunflower', 'Daisy', 'Colts\'s_Foot',
                    'Dandelion', 'Cowslip', 'Buttercup', 'Windflower', 'Pansy']

for p in pl:
    if '.jpg' in p:
        index = int(p.split("_")[-1].strip(".jpg")) - 1
        classname = index // 80
        classname = flower_className[classname]
        os.makedirs(path + '/' + classname, exist_ok=True)
        shutil.move(path + '/' + p, path + '/' + classname + '/' + p)

print("[INFO]")
imagePaths = list(paths.list_images(r'C:\Users\Michael\Desktop\Data Science\My directory set-up for Computer-Vision\Deep learning for computer vision - Practitioneers bundle\datasets\Flowers17'))
aap = AspectAwarePreprocessor(64,64)
sdl = SimpleDatasetLoader(preprocessors=[aap])
(data, labels) = sdl.load(imagePaths, verbose=500)

#preprocessing the data
data = data.astype("float")/255.0
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
trainX, testX, trainY, testY = train_test_split(data, labels, random_state=42, test_size=0.25)

#building the netwok and applying data augmentaion
opt = SGD(lr = 0.05, nesterov=True, momentum = 0.9)
aug = ImageDataGenerator(rotation_range = 30, width_shift_range = 0.1, zoom_range = 0.2,
                         height_shift_range = 0.1, shear_range = 0.2, horizontal_flip = True,
                         fill_mode = "nearest")
model = MiniVGGNet.build(width = 64, height = 64, depth = 3, classes = len(flower_className))
Beispiel #3
0
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=False, default=  "./train/images", help="path to the input dataset")
ap.add_argument("-o", "--output", required=False, default= "./train/gameofdeeplearning.png", help="path to save the plot")
ap.add_argument("-m", "--model", required=False, default=  "./train/gameofdeeplearning.hdf5", help="path to save the model")
args = vars(ap.parse_args())


#grab the list of images that we'll be describing
print("[INFO] laoding images...")
imagePaths = list(paths.list_images(args["dataset"]))



# initialize the image preprocessors
aap = AspectAwarePreprocessor(145, 145)
iap = ImageToArrayPreprocessor()
#sp = SimplePreprocessor(200, 145)
#iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixels intensities to range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 70% of
# the data for training and the remaining 30% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.30, random_state=42)

# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
Beispiel #4
0
from keras.models import Model
from imutils import paths
import numpy as np
import os
import cv2

##prepare the image data generator for data augmentaion
#aug = ImageDataGenerator(rotation_range=30, width_shift_range = 0.1, height_shift_range = 0.1,
#                         shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, fill_mode = "nearest")

#preparing the data and the list of images
imagePaths = list(paths.list_images(r"C:\Users\H P ENVY\Desktop\Data Science\My directory set-up for Computer-Vision\datasets\Flowers17"))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]
#baseModel  = VGG16()
aap = AspectAwarePreprocessor(224,224)
isp = ImageToArrayPreprocessor()

#load the dataset from disk and then 
sdl = SimpleDatasetLoader(preprocessors = [aap, isp])
(data, labels) = sdl.load(imagePaths, verbose = 1)
data = data.astype("float") / 255.0

#split the dataset into the required stages
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.25, random_state = 42)

#Binarize the labels
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

#introduce the baseModel, buiid the headmodel, introduce the class objects
le = LabelEncoder()
trainLabels = le.fit_transform(trainLabels)
split = train_test_split(trainPaths,
                         trainLabels,
                         test_size=config.NUM_TEST_IMAGES,
                         stratify=trainLabels,
                         random_state=42)
(trainPaths, testPaths, trainLabels, testLabels) = split
split = train_test_split(trainPaths,
                         trainLabels,
                         test_size=config.NUM_VAL_IMAGES,
                         stratify=trainLabels,
                         random_state=42)
(trainPaths, valPaths, trainLabels, valLabels) = split
(R, G, B) = ([], [], [])
aap = AspectAwarePreprocessor(256, 256)

dataset = [("train", trainPaths, trainLabels, config.TRAIN_HDF5),
           ("test", testPaths, testLabels, config.TEST_HDF5),
           ("val", valPaths, valLabels, config.VAL_HDF5)]

for (dType, paths, labels, outputPath) in dataset:
    writer = HDF5DatasetWriter((len(paths), 256, 256, 3),
                               outputPath,
                               bufSize=20)
    for (path, label) in zip(paths, labels):
        image = cv2.imread(path)
        image = aap.preprocess(image)

        if dType == "train":
            (b, g, r) = cv2.mean(image)[:3]
Beispiel #6
0
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=False, default=  "./train/test", help="path to the input dataset")
ap.add_argument("-o", "--output", required=False, default= "./train/gameofdeeplearning.png", help="path to save the plot")
ap.add_argument("-m", "--model", required=False, default=  "./train/gameofdeeplearning.hdf5", help="path to save the model")
args = vars(ap.parse_args())


#grab the list of images that we'll be describing
print("[INFO] laoding images...")
imagePaths = list(paths.list_images(args["dataset"]))



# initialize the image preprocessors
aap = AspectAwarePreprocessor(210, 210)
iap = ImageToArrayPreprocessor()
#sp = SimplePreprocessor(200, 145)
#iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixels intensities to range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 70% of
# the data for training and the remaining 30% for testing
(trainX, testX, trainY, testY) = train_test_split(data, data, test_size=0.30, random_state=42)

## convert the labels from integers to vectors
#trainY = LabelBinarizer().fit_transform(trainY)