예제 #1
0
    def _start_training(self):
        # binarize the labels
        lb = LabelBinarizer()
        labels = lb.fit_transform(self.labels)

        # 80% for training and 20% for testing
        (trainX, testX, trainY, testY) = train_test_split(self.data,
                                                          labels,
                                                          test_size=0.2,
                                                          random_state=42)

        # construct the image generator for data augmentation
        datagen = ImageDataGenerator(rotation_range=25,
                                     width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     shear_range=0.2,
                                     zoom_range=0.2,
                                     horizontal_flip=True,
                                     fill_mode="nearest")

        # initialize the model
        logger.info("compiling model...")
        model = SmallerVGGNet.build(width=IMAGE_DIMENSIONS[1],
                                    height=IMAGE_DIMENSIONS[0],
                                    depth=IMAGE_DIMENSIONS[2],
                                    classes=len(lb.classes_))
        opt = Adam(lr=self.init_lr, decay=self.init_lr / self.epochs)
        model.compile(loss="categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])

        # train the network
        logger.info("training network...")
        return lb, model, model.fit(datagen.flow(trainX,
                                                 trainY,
                                                 batch_size=self.bs),
                                    validation_data=(testX, testY),
                                    steps_per_epoch=len(trainX) // self.bs,
                                    epochs=self.epochs,
                                    verbose=1)
예제 #2
0
                                                  test_size=0.2,
                                                  random_state=42)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the model
print("[INFO] compiling model...")
model = SmallerVGGNet.build(width=IMAGE_DIMS[1],
                            height=IMAGE_DIMS[0],
                            depth=IMAGE_DIMS[2],
                            classes=len(lb.classes_))
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
                        verbose=1)

# save the model to disk
예제 #3
0
trainY = to_categorical(trainY, num_classes=2, dtype='float32')
testY = to_categorical(testY, num_classes=2, dtype='float32')

# construct image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode='nearest')
"""# **4. First Model Initializing - VGG Architecture**"""

# initialize our VGG-lie Convolutional Neural Network
model = SmallerVGGNet.build(width=IMG_WIDTH,
                            height=IMG_HEIGHT,
                            depth=3,
                            classes=len(lb.classes_))

# initialize our initial learning rage, # of epochs to train for and batch size
INIT_LR = 0.01
EPOCHS = 75
BATCH_SIZE = 32

# initialize the model and optimizer
print('[INFORMATION] Loading Neural Network Model...')
opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
print('[INFORMATION] Neural Network Model successfully loaded!\n')
model.summary()
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the model using a sigmoid activation as the final layer
# in the network so we can perform multi-label classification
print("[INFO] compiling model...")
model = SmallerVGGNet.build(width=IMAGE_DIMS[1],
                            height=IMAGE_DIMS[0],
                            depth=IMAGE_DIMS[2],
                            classes=len(mlb.classes_),
                            finalAct="sigmoid")

# initialize the optimizer (SGD is sufficient)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

# compile the model using binary cross-entropy
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
예제 #5
0
                                                  test_size=0.2,
                                                  random_state=42)
trainY = to_categorical(trainY, num_classes=2)
testY = to_categorical(testY, num_classes=2)

# augmenting datset
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# build model
model = SmallerVGGNet.build(width=96, height=96, depth=3, classes=2)

# compile the model
opt = Adam(lr=lr, decay=lr / epochs)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

checkpoint = ModelCheckpoint(filepath='cnn.model',
                             verbose=1,
                             save_best_only=True)

earlystop = EarlyStopping(
    monitor='val_loss',  # value being monitored for improvement
    min_delta=0,  #Abs value and is the min change required before we stop
    patience=15,  #Number of epochs we wait before stopping 
    verbose=1,
    restore_best_weights=True)  #keeps the best weigths once stopped
예제 #6
0
# construct the image generator for data augmentation	# dont think we want rotated images, thats going to introduce problems
aug = ImageDataGenerator(rotation_range=0,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.05,
                         zoom_range=0.0,
                         horizontal_flip=False,
                         fill_mode="nearest")

num_classes = 6
# initialize the model
print("[INFO] compiling model...w={}, h={}, d={}".format(
    IMAGE_DIMS[1], IMAGE_DIMS[0], IMAGE_DIMS[2]))
model = SmallerVGGNet.build(width=IMAGE_DIMS[1],
                            height=IMAGE_DIMS[0],
                            depth=IMAGE_DIMS[2],
                            classes=num_classes)
#opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
#model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model.compile(loss="mse", optimizer='adam', metrics=["accuracy"])

#estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=5, verbose=2)  # KerasRegressor for regression problem

# train the network
print("[INFO] training network...")
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX) // BS,
                        epochs=EPOCHS,
                        verbose=0,
                        shuffle=False)  # was verbose=1
예제 #7
0
def train(arc):
    print("Loading images...")
    imagePaths = sorted(list(paths.list_images("train")))
    ground_truth = open("new_ground_truth.txt")
    random.seed(42)
    random.shuffle(imagePaths)

    data = []
    labels = []
    data_test = []
    data_labels = []

    for imagePath in imagePaths:
        image = cv2.imread(imagePath)
        image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
        image = img_to_array(image)
        data.append(image)
        l = imagePath.split(os.path.sep)[-2].split("_")[0].replace('\n', '')
        labels.append(l)

    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)
    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)

    test_images = os.listdir("test")

    for line in ground_truth.readlines():
        image_path = line.split("\t")[0]
        if image_path in test_images:
            image = cv2.imread("test/" + image_path)
            image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
            image = img_to_array(image)
            data_test.append(image)
            l = label = line.split("\t")[1].replace("\n", "")
            data_labels.append(l)

    data_test = np.array(data_test, dtype="float") / 255.0
    data_labels = np.array(data_labels)
    lb2 = LabelBinarizer()
    data_labels = lb2.fit_transform(data_labels)

    trainX = data
    trainY = labels
    testX = data_test
    testY = data_labels
    #(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, random_state=42)
    aug = ImageDataGenerator(rotation_range=25,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    if arc == "vgg":
        model = SmallerVGGNet.build(width=IMAGE_DIMS[1],
                                    height=IMAGE_DIMS[0],
                                    depth=IMAGE_DIMS[2],
                                    classes=len(lb.classes_))
    else:
        model = Alexnet.build(width=IMAGE_DIMS[1],
                              height=IMAGE_DIMS[0],
                              depth=IMAGE_DIMS[2],
                              classes=len(lb.classes_))

    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

    plot_model(model, to_file='model.png')
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    print("Training the network...")

    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1)

    if arc == "vgg":
        model.save("modVgg.model")
    else:
        model.save("modAlex.model")

    f = open("lb.pickle", "wb")
    f.write(pickle.dumps(lb))
    f.close()

    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="upper left")
    plt.savefig("plot1.png")
(trainX, testX, trainY, testY) = train_test_split(data_train,
                                                  labels_train,
                                                  test_size=0.1,
                                                  random_state=42)

from keras.optimizers import SGD
# load the model
#inputss = Input(shape=(96,96,3),name = 'image_input')
#model = NASNetLarge()
print("[INFO] compiling model...")
# initialize the optimizer
IMAGE_DIMS = (96, 96, 3)
model = SmallerVGGNet.build(width=IMAGE_DIMS[1],
                            height=IMAGE_DIMS[0],
                            depth=IMAGE_DIMS[2],
                            classes=3,
                            finalAct="softmax")

# initialize the optimizer
# opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
opt = SGD(lr=0.01, momentum=0.9, decay=0.1)

model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])
#model.save("Original_Data.h5")
# train the network
print("[INFO] training network...")
#print(model.summary())
H = model.fit(trainX,