from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

# grab the list of images that we'll be describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialize the image preprocessors
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)
bSize = 8  # 128

# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
pp = PatchPreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                bSize,
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              bSize,
                              preprocessors=[sp, mp, iap],
                              classes=2)
Example #3
0
sys.append("../")
from pyimagesearch.utils.ranked import rank5_accuracy
from dog_vs_cat.configs import dog_vs_cat_configs as configs
from pyimagesearch.preprocessing.croppreprocessor import CropPreprocessor
from pyimagesearch.preprocessing.meanpreprocessor import MeanPreprocessor
from pyimagesearch.preprocessing.simplepreprocessor import SimplePreprocessor
from pyimagesearch.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor

# load RGB mean values
means = json.loads(open(configs.DATASET_MEAN).read())

# load preprocessors
ip = ImageToArrayPreprocessor()
cp = CropPreprocessor(width=227, height=227)
sp = SimplePreprocessor(width=227, height=227)
mp = MeanPreprocessor(rMean=means["R"], gMean=means["G"], bMean=means["B"])


# custom preprocessor class which utilizes custom preprocessors
class CustomPreprocessing:
    def __init__(self, preprocessors=[]):
        self.preprocessors = preprocessors

    def preprocess(self, image):
        """ applies preprocessors on image """
        for preprocessor in self.preprocessors:
            image = preprocessor.preprocess(image)

        return image
Example #4
0
args = vars(ap.parse_args())

# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=18,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                64,
                                aug=aug,
                                preprocessors=[sp, mp, iap],
                                classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              64,
                              preprocessors=[sp, mp, iap],
                              classes=config.NUM_CLASSES)

# if there is no specific model checkpoint supplied, then initialize
Example #5
0
    print("Driver: {}/{}".format(in_ds.GetDriver().ShortName,
                                 in_ds.GetDriver().LongName))
    print("Size is {} x {} x {}".format(in_ds.RasterXSize, in_ds.RasterYSize,
                                        in_ds.RasterCount))
    print("Projection is {}".format(in_ds.GetProjection()))
    geotransform = in_ds.GetGeoTransform()
    if geotransform:
        print("Origin = ({}, {})".format(geotransform[0], geotransform[3]))
        print("Pixel Size = ({}, {})".format(geotransform[1], geotransform[5]))

    #plt.figure(1)
    #plt.imshow(in_ds.GetRasterBand(1).ReadAsArray(), cmap="nipy_spectral")
    return (in_ds, geotransform)


sp = SimplePreprocessor(config.IMAGE_SIZE_COLS, config.IMAGE_SIZE_ROWS)

# extract image paths and coordinates from stereo_pose_est.data
renav, o_lat, o_lon, ftype = rutil.read_renav(config.NAV_PATH)

# get bathymetry (gdal raster) and geotransform
bathy_ds, geotransform = extract_geotransform(config.BATHY_PATH)
# tranformation from lat lon to utm for bathymetry
utm_proj = pyproj.Proj(config.BATHY_UTM_PROJ_PARAMS)

# split into training, validation and testing
# load image, extract bathy patch
# save to HDF5

# perform sampling from the training set to build the
# testing split from the training data