Exemplo n.º 1
0
from project.safe_belt import config
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from preprocessing.aspectawarepreprocessor import AspectAwarePreprocessor
from preprocessing.simplepreprocessor import SimplePreprocessor
from tools.io_.hdf5datasetwriter import HDF5DatasetWriter
from tools import paths
import numpy as np
import progressbar
import json
import cv2
import os

# grab the paths to the images
trainPaths = paths.list_images(config.IMAGE_PATH)
trainLabels = [p.split(os.path.sep)[-2] for p in trainPaths]

le = LabelEncoder()
trainLabels = le.fit_transform(trainLabels)

# perform stratified sampling from the training set to build the
# testing split from the training data
split = train_test_split(trainPaths,
                         trainLabels,
                         test_size=config.RATIO_TEST_IMAGES,
                         stratify=trainLabels,
                         random_state=42)
trainPaths, testPaths, trainLabels, testLabels = split

# perform another stratified sampling, this time to build the
# validation data
Exemplo n.º 2
0
from field_test.deepgooglenet.config import tiny_imagenet_config as config
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from cnn.io_.hdf5datasetwriter import HDF5DatasetWriter
from tools import paths
import numpy as np
import progressbar
import json
import cv2
import os

# grab the paths to the training images, then extract the training
# class labels and encode them
trainPaths = list(paths.list_images(config.TRAIN_IMAGES))
trainLabels = [p.split(os.path.sep)[-3] for p in trainPaths]
le = LabelEncoder()
trainLabels = le.fit_transform(trainLabels)

# perform stratified sampling from the training set to construct a
# a testing set
split = train_test_split(trainPaths,
                         trainLabels,
                         test_size=config.NUM_TEST_IMAGES,
                         stratify=trainLabels,
                         random_state=42)
trainPaths, testPaths, trainLabels, testLabels = split

# load the validation filename => class from file and then use these
# mappings to build the validation paths and label lists
M = open(config.VAL_MAPPINGS).read().strip().split('\n')
M = [r.split('\t')[:2] for r in M]
Exemplo n.º 3
0
                help='batch size of images to be passed through network')
ap.add_argument('-s',
                '--buffer-size',
                type=int,
                default=1024,
                help='size of feature extraction buffer')
args = vars(ap.parse_args())

# store the batch size in a convenience variable
bs = args['batch_size']

# grab the list of images that we’ll be describing then randomly
# shuffle them to allow for easy training and testing splits via
# array slicing during training time
print('[INFO] loading images...')
imagePaths = paths.list_images(args['dataset'])
random.shuffle(imagePaths)

# extract the class labels from the image paths then encode the
# labels
labels = [p.split('/')[-2] for p in imagePaths]
le = LabelEncoder()
labels = le.fit_transform(labels)

# load the ResNet50 network
print('[INFO] loading network...')
model = InceptionResNetV2(weights='imagenet', include_top=False)

# initialize the HDF5 dataset writer, then store the class label
# names in the dataset
# The final average pooling layer of ResNet50 is 2048-d,
Exemplo n.º 4
0
# load the image from disk
import os

import cv2

from field_test.smart_test.find_tripod.config import tripod_config as config
from cnn.preprocessing.aspectawarepreprocessor import AspectAwarePreprocessor
from tools import paths

raw_images_paths = paths.list_images(config.RAW_IMAGE_PATH)

for path in raw_images_paths:
    image = cv2.imread(path)

    aap = AspectAwarePreprocessor(1024, 1024, inter=cv2.INTER_LANCZOS4, gray=False)
    # crop images
    image = aap.preprocess(image)

    dir = config.IMAGE_PATH + '/' + path.split(os.path.sep)[-2] + '/'
    if not os.path.exists(dir):
        os.makedirs(dir)

    cv2.imwrite(dir + path.split(os.path.sep)[-1], image, [cv2.IMWRITE_JPEG_QUALITY, 100])
Exemplo n.º 5
0
args = vars(ap.parse_args())

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         vertical_flip=True,
                         fill_mode="nearest")

# grab the list of images that we’ll be describing, then extract
# the class label names from the image paths
print("[INFO] loading images...")
imagePaths = list_images(args["dataset"])

# initialize the image preprocessors
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities to
# the range [0, 1]
sdl = SimpleDatasetLoader(preprocessor=[iap])
data, labels = sdl.load(imagePaths, verbose=500)
data = data / 255
classNames = [str(x) for x in np.unique(labels)]

# convert the labels from integers to vectors
labels = LabelBinarizer().fit_transform(labels)

# account for skew in the labeled data