def processing_image(image): aap = AspectAwarePreprocessor(64, 64) iap = ImageToArrayPreprocessor() sdl_nivel = SimpleDatasetLoader(preprocessors=[aap, iap]) X_test = sdl_nivel.preprocess(image) X_test = X_test.astype("float") / 255.0 return X_test
import os # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-d", "--dataset", required=True, help="path to input dataset") args = vars(ap.parse_args()) # grab the list of images that we’ll be describing, then extract # the class label names from the image paths print("[INFO] loading images...") imagePaths = list(paths.list_images(args["dataset"])) classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths] classNames = [str(x) for x in np.unique(classNames)] # initialize the image preprocessors aap = AspectAwarePreprocessor(64, 64) iap = ImageToArrayPreprocessor() # load the dataset from disk then scale the raw pixel intensities # to the range [0, 1] sdl = SimpleDatasetLoader(preprocessors=[aap, iap]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.astype("float") / 255.0 # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
# validation data (trainPaths, valPaths, trainLabels, valLabels) = train_test_split(trainPaths, trainLabels, test_size=config.NUM_VAL_IMAGES, stratify=trainLabels, random_state=42) # construct a list pairing the training, validation, and testing # image paths along with their corresponding labels and output HDF5 files datasets = [("train", trainPaths, trainLabels, config.TRAIN_HDF5), ("val", valPaths, valLabels, config.VAL_HDF5), ("test", testPaths, testLabels, config.TEST_HDF5)] # initialize the image preprocessor and the lists of RGB channel averages aaPreprocessor = AspectAwarePreprocessor(256, 256) (R, G, B) = ([], [], []) # loop over the dataset tuples for (datasetType, paths, labels, outputPath) in datasets: # create HDF5 writer print("[INFO] building " + str(outputPath) + " ...") writer = HDF5DatasetWriter((len(paths), 256, 256, 3), outputPath) # initialize the progress bar widgets = [ "Building Dataset: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA() ]
model_path='../saved_models/{}_fine_tune.model'.format(db_name) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # grab the list of images that we'll be describing, then extract # the class label names from the image paths print("[INFO] loading images...") imagePaths = list(paths.list_images(db_path)) classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths] classNames = [str(x) for x in np.unique(classNames)] # initialize the image preprocessors aap = AspectAwarePreprocessor(224, 224) iap = ImageToArrayPreprocessor() # load the dataset from disk then scale the raw pixel intensities to # the range [0, 1] sdl = SimpleDatasetLoader(preprocessors=[aap, iap]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.astype("float") / 255.0 # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # convert the labels from integers to vectors trainY = LabelBinarizer().fit_transform(trainY)