Beispiel #1
0
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True,
                help='Path to input dataset')
ap.add_argument('-n', '--neighbors', required=False, type=int, default=1,
                help='# of nearest neighbors for classification')
ap.add_argument('-j', '--jobs', required=False, type=int, default=-1,
                help='# of jobs for k-NN distance (-1 uses all available cores)')
args = vars(ap.parse_args())

# Get list of image paths
image_paths = list(paths.list_images(args['dataset']))

# Initialize SimplePreprocessor and SimpleDatasetLoader and load data and labels
print('[INFO]: Images loading....')
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(image_paths, verbose=500)

# Reshape from (3000, 32, 32, 3) to (3000, 32*32*3=3072)
data = data.reshape((data.shape[0], 3072))


# Print information about memory consumption
print('[INFO]: Features Matrix: {:.1f}MB'.format(
    float(data.nbytes / (1024*1000.0))))

# Encode labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)
ap.add_argument("-m", "--model", type=str,
                help="path to *specific* model checkpoint to load")
ap.add_argument("-s", "--start-epoch", type=int, default=0,
                help="epoch to restart training at")
args = vars(ap.parse_args())

# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20, width_shift_range=0.2,
	height_shift_range=0.2, shear_range=0.15, zoom_range=0.15,
	horizontal_flip=True, fill_mode="nearest")

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(64, 64)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
iap = ImageToArrayPreprocessor()

# initialize the training and validation dataset generators
bs = 64
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5, batchSize=bs, aug=aug,
	preprocessors=[sp, mp, iap], classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5, batchSize=bs,
	preprocessors=[sp, mp, iap], classes=config.NUM_CLASSES)

# if there is no specific model checkpoint supplied, then initialize
# the network and compile the model
if args["model"] is None:
	print("[INFO] compiling model...")
	opt = Adam(lr=1e-3)
model_name = input("Please input name of pretrained ImageNet model to use: ")
if model_name not in MODELS.keys():
    raise AssertionError(
        "The model name should either be xception, resnet50, inception")

input_shape = (224, 224)
preprocess = imagenet_utils.preprocess_input
if model_name in ("xception", "inception"):
    input_shape = (299, 299)
    preprocess = preprocess_input
(height, width) = input_shape

Network = MODELS[model_name]
model = Network(weights="imagenet")

#importing the dataset and exploring its possibilities
imagePaths = list(
    paths.list_images(
        r"C:\Users\Michael\Desktop\Data Science\DL4CVStarterBundle-master\DL4CVStarterBundle-master\datasets\animals"
    ))
sp = SimplePreprocessor(height, width)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(dataset, _) = sdl.load(imagePaths)
images = preprocess(dataset)
preds = model.predict(dataset)
P = imagenet_utils.decode_predictions(preds)

model = Xception()
plot_model(model, to_file="Xception.png", show_shapes=True)
ann_viz(model, title="some neural network model", filename="picture.gv")
from utilities.preprocessing import ImageToArrayPreprocessor
from utilities.preprocessing import SimplePreprocessor
from utilities.preprocessing import MeanPreprocessor
from utilities.preprocessing import CropPreprocessor
from utilities.io import HDF5DatasetGenerator
from utilities.utils.ranked import rank5_accuracy
from keras.models import load_model
import numpy as np
import progressbar
import json

# load the RGB means for the training set
means = json.loads(open(config.DATASET_MEAN).read())

# initialize the image preprocessors
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

# load the pretrained network
print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

# initialize the testing dataset generators, then make predictions on
# the testing data
print("[INFO] predicting on test data (no crops)...")
bs = 8
testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               batchSize=bs,
                               preprocessors=[sp, mp, iap],
    alpha = baseLR * (1 - (epoch / float(maxEpochs))) ** power
    # Return the learning rate
    return alpha

# Show information on the process ID
print("[INFO]: Process ID: {}".format(os.getpid()))

# Grab the list of images
print("[INFO]: Loading images....")
image_paths = list(paths.list_images(args["dataset"]))

#print(image_paths)

# Initialize the image preprocessors
img_size = args["image_size"]
sp = SimplePreprocessor(img_size, img_size)
itap = ImageToArrayPreprocessor()

# Load the dataset and scale the raw pixel intensities to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, itap])
(data, labels) = sdl.load(image_paths, verbose=500)
data = data.astype("float") / 255.0

# Split the data into training data (75%) and testing data (25%)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)

# Convert the labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.fit_transform(testY)