"airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
    "ship", "truck"
]
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=10,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         horizontal_flip=True,
                         fill_mode="nearest")

# looping over the # of models

for i in np.arange(0, args["num_models"]):
    print("[INFO] training model {}/{}".format(i + 1, args["num_models"]))
    opt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)
    model = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    # train!
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=64),
                            validation_data=(testX, testY),
                            epochs=40,
                            steps_per_epoch=len(trainX) // 64,
                            verbose=1)

    # save!
    p = [args["model"], "model_{}.model".format(i)]
    model.save(os.path.sep.join(p))

    # evalute the network
예제 #2
0
sp = SimplePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

(X_train, X_test, y_train, y_test) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25)

lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_test = lb.transform(y_test)

print("[INFO] building and training model...")
model = MiniVGGNet.build(64, 64, 3, len(classNames))
opt = SGD(lr=0.05)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])
H = model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              epochs=100,
              batch_size=32,
              verbose=1)

model.save('minivggnet_without_data_augmentation.hdf5')

print("[INFO] evaluating network...")
preds = model.predict(X_test, batch_size=32)
classNames = [str(x) for x in np.unique(classNames)]

aap = AspectAwarePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float")/255.0

(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25,  random_state=42)

trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] compiling model...")
opt = SGD(lr=0.005)
model = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

print("[INFO] training network...")
H = model.fit(trainX,  trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)

print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=classNames))

# Commented out IPython magic to ensure Python compatibility.
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 100), H.history["acc"], label="train_acc")
# one-hot encode the training and testing labels
trainY = np_utils.to_categorical(trainY, 10)
testY = np_utils.to_categorical(testY, 10)

# initialize the label names
labelNames = [
    "top", "trouser", "pullover", "dress", "coat", "sandal", "shirt",
    "sneaker", "bag", "ankle boot"
]

# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=INIT_LR, momentum=0.9, decay=INIT_LR / NUM_EPOCHS)
model = MiniVGGNet.build(width=IMG_WIDTH,
                         height=IMG_HEIGHT,
                         depth=IMG_DEPTH,
                         classes=10)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# train the network
print("[INFO] training model...")
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              batch_size=BS,
              epochs=NUM_EPOCHS)

# make predictions on the test set
preds = model.predict(testX)
# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TEST_HDF5,
                                config.BATCH_SIZE,
                                classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              config.BATCH_SIZE,
                              classes=config.NUM_CLASSES)

# if there is no specific model checkpoint supplied, then initialize
# the network (ResNet-56) and compile the model
if args["model"] is None:
    print("[INFO] compiling model...")
    #opt = SGD(lr=config.LEARNING_RATE,nesterov=True,decay=config.DECAY)
    #opt = Adam(lr=config.LEARNING_RATE)
    opt = Adam(lr=0.01)
    model = MiniVGGNet.build(config.RESIZE, config.RESIZE, config.NUM_CHANNELS,
                             config.NUM_CLASSES)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

# otherwise, load the checkpoint from disk
else:
    print("[INFO] loading {}...".format(args["model"]))
    model = load_model(args["model"])

    # update the learning rate
    print("[INFO] old learning rate: {}".format(K.get_value(
        model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 0.01)
    print("[INFO] new learning rate: {}".format(K.get_value(
        model.optimizer.lr)))
예제 #6
0
trainY = lb.fit_transform(trainY)
testY = lb.fit_transform(testY)

labelNames = [
    "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
    "ship", "truck"
]

print("[INFO] Compling the model...")

print(np.shape(trainY))
input_h = trainX.shape[1]
input_W = trainX.shape[2]
input_d = trainX.shape[3]
input_classes = trainY.shape[1]
model = MiniVGGNet.build(input_h, input_W, input_d, input_classes)
opt = SGD(0.05)
model.compile(optimizer=opt,
              loss="categorical_crossentropy",
              metrics=["accuracy"])

print("[INFO] Training the network...")
epochs = 40
H = model.fit(trainX,
              trainY,
              batch_size=64,
              epochs=epochs,
              verbose=1,
              shuffle=True)

print("[INFO] Evaluating Network...")
예제 #7
0
파일: inspect_model.py 프로젝트: zlyin/Orca
parser.add_argument("-i", "--include_top", type=int, default=1, \
        help="1/-1 indicates whether to include the head of neural network or not")
parser.add_argument("-m", "--model", type=str, default="vgg16", \
        help="which model model to inspect")
args = vars(parser.parse_args())

networkBanks = {
        "vgg16" : VGG16(weights="imagenet", include_top=args["include_top"] > 0),
        "vgg19" : VGG19(weights="imagenet", include_top=args["include_top"] > 0),
        "resnet50" : ResNet50(weights="imagenet", \
                include_top=args["include_top"] > 0),
        "inceptionv3" : InceptionV3(weights="imagenet", \
                include_top=args["include_top"] > 0),
        "shallownet" : ShallowNet.build(height=28, width=28, depth=3, classes=10),
        "lenet" : LeNet.build(height=28, width=28, depth=3, classes=10),
        "minivgg" : MiniVGGNet.build(height=28, width=28, depth=3, classes=10),
        #"kfer_lenet" : KFER_LeNet.build(height=48, width=48, depth=3, classes=7),
        }

## loading network
print("[INFO] loading network =", args["model"])
model = networkBanks[args["model"]]

# inspect layers
for i, layer in enumerate(model.layers):
    print("[INFO] {}\t{}".format(i, layer.__class__.__name__))




예제 #8
0
# scale data to the range of [0, 1]
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
 
# one-hot encode the training and testing labels
trainY = np_utils.to_categorical(trainY, 10)
testY = np_utils.to_categorical(testY, 10)
 
# initialize the label names
labelNames = ["top", "trouser", "pullover", "dress", "coat",
	"sandal", "shirt", "sneaker", "bag", "ankle boot"]

# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=INIT_LR, momentum=0.9, decay=INIT_LR / NUM_EPOCHS)
model = MiniVGGNet.build(width=28, height=28, depth=1, classes=10)
model.compile(loss="categorical_crossentropy", optimizer=opt,
	metrics=["accuracy"])
 
# train the network
print("[INFO] training model...")
H = model.fit(trainX, trainY,
	validation_data=(testX, testY),
	batch_size=BS, epochs=NUM_EPOCHS)

# make predictions on the test set
preds = model.predict(testX)
 
# show a nicely formatted classification report
print("[INFO] evaluating network...")
print(classification_report(testY.argmax(axis=1), preds.argmax(axis=1),