コード例 #1
0
def main_vgg16():
    print("Hello World")
    vgg16_model = tf.keras.applications.vgg16.VGG16()

    vgg16_model.summary()

    type(vgg16_model)

    model_new = Sequential()
    for layer in vgg16_model.layers[:-1]:
        model_new.add(layer)

    for layer in model_new.layers:
        layer.trainable = False

    model_new.add(Dense(units=10, activation='softmax'))

    model_new.summary()

    model_new.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])

    train_batches, valid_batches, test_batches = hf_handler.process_data()
    model_new.fit(x=train_batches,
            steps_per_epoch=len(train_batches),
            validation_data=valid_batches,
            validation_steps=len(valid_batches),
            epochs=3,
            verbose=2)

    import os.path
    if os.path.isfile('digit_vgg16_models_03.h5') is False:
        model_new.save('models/digit_vgg16_models_03.h5')

    test_imgs, test_labels = next(test_batches)
    hf_handler.plotImages(test_imgs)
    print(test_labels)

    predictions = model_new.predict(x=test_batches, steps=len(test_batches), verbose=0)

    cm = confusion_matrix(y_true=test_batches.classes, y_pred=np.argmax(predictions, axis=-1))
    cm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    hf_handler.plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')
コード例 #2
0
def main_vgg16():
    train_batches, valid_batches, test_batches = hf_handler.process_data()

    vgg16_model = tf.keras.applications.vgg16.VGG16()
    vgg16_model.summary()
    type(vgg16_model)
    model_loaded = Sequential()
    for layer in vgg16_model.layers[:-1]:
        model_loaded.add(layer)

    for layer in model_loaded.layers:
        layer.trainable = False

    model_loaded.add(Dense(units=10, activation='softmax'))
    model_loaded.summary()
    model_loaded.compile(optimizer=Adam(learning_rate=0.0001),
                         loss='categorical_crossentropy',
                         metrics=['accuracy'])

    model_loaded.load_weights('models/digit_vgg16_models_03.h5')
    predictions = model_loaded.predict(x=test_batches,
                                       steps=len(test_batches),
                                       verbose=2)

    print(test_batches)
    print(predictions)

    for iCnt in range(len(predictions)):
        print("----------")
        print("prediction : ", predictions[iCnt])
        print("test_batches.classes : ", test_batches.classes[iCnt])

    cm = confusion_matrix(y_true=test_batches.classes,
                          y_pred=np.argmax(predictions, axis=-1))

    cm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    hf_handler.plot_confusion_matrix(cm=cm,
                                     classes=cm_plot_labels,
                                     title='Confusion Matrix')
コード例 #3
0
def main():
    train_batches, valid_batches, test_batches = hf_handler.process_data()
    model_loaded = load_model('models/digit_trial_models_01.h5')

    predictions = model_loaded.predict(x=test_batches,
                                       steps=len(test_batches),
                                       verbose=2)

    print(test_batches)
    print(predictions)

    for iCnt in range(len(predictions)):
        print("----------")
        print("prediction : ", predictions[iCnt])
        print("test_batches.classes : ", test_batches.classes[iCnt])

    cm = confusion_matrix(y_true=test_batches.classes,
                          y_pred=np.argmax(predictions, axis=-1))

    cm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    hf_handler.plot_confusion_matrix(cm=cm,
                                     classes=cm_plot_labels,
                                     title='Confusion Matrix')
コード例 #4
0
def main():
    print("Hello World!")
    initialize()
    train_batches, valid_batches, test_batches = hf_handler.process_data()

    ### Sequantial: Group a linear stack of layers into a tensorflow keras model tepology.
    ### Conv2D: A 2-Dim Convolutional Layer. It will have 32 output filters each with a kernel
    ###         size of 3x3, and Relu Activation function. Padding of 'same' enables zero-padding.
    ###         The input_shape data which is specified on the first layer only.  In this case,
    ###         The image is 224 pixels high and 224 pixels wide with 3 color chanels (RGB)
    ### MaxPool2D: The 2-Dim max pooling layer reduces the dimension of the data.
    ### Conv2D: This layer has a filter of size 64 and no input_shape
    ### MaxPool2D: The 2-Dim max pooling layer reduces the dimension of the data.
    ### Flatten: Flatten the convolutiona layer and pass it to dense layer.
    ### Dense: This is the output layer of the network, and has 10 nodes which is the number of
    ### of output or classification. It uses the softmax activation function on the output, such that
    ### each sample is a probability distribution over the outputs.
    model = Sequential([
        Conv2D(filters=32,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               input_shape=(224, 224, 3)),
        MaxPool2D(pool_size=(2, 2), strides=2),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same'),
        MaxPool2D(pool_size=(2, 2), strides=2),
        Flatten(),
        Dense(units=10, activation='softmax')
    ])

    model.summary()

    ### Optimizer: Adam implies particular Stochastic Gradient Descent with learning rate of 0.0001
    ### loss: This value is specified as sparese categrical cross entropy.
    model.compile(optimizer=Adam(learning_rate=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x=train_batches,
              steps_per_epoch=len(train_batches),
              validation_data=valid_batches,
              epochs=3,
              verbose=2)

    import os.path
    if os.path.isfile('digit_trial_models_01.h5') is False:
        model.save('models/digit_trial_models_01.h5')

    test_imgs, test_labels = next(test_batches)
    hf_handler.plotImages(test_imgs)
    print(test_labels)

    test_batches.classes
    test_batches.class_indices

    predictions = model.predict(x=test_batches,
                                steps=len(test_batches),
                                verbose=0)

    cm = confusion_matrix(y_true=test_batches.classes,
                          y_pred=np.argmax(predictions, axis=-1))

    cm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    hf_handler.plot_confusion_matrix(cm=cm,
                                     classes=cm_plot_labels,
                                     title='Confusion Matrix')
コード例 #5
0
def main():
    print("Hello World")
    ### The next line is for understanding the Convolutional Neural Netowrk
    ### VGG16 model proposed by K. Simonyan and A. Zisserman
    ### construct_VGG16_Neural_Network()

    model_vgg16 = Sequential()
    ### block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
    model_vgg16.add(Conv2D(name="block1_conv1", filters=64, kernel_size=(3,3), padding="same", activation="relu", input_shape=(224,224,3)))
    ### block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
    model_vgg16.add(Conv2D(name="block1_conv2", filters=64,kernel_size=(3,3),padding="same", activation="relu"))
    ### block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
    model_vgg16.add(MaxPool2D(name="block1_pool", pool_size=(2,2),strides=(2,2)))  
    ### block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
    model_vgg16.add(Conv2D(name="block2_conv1", filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    ### block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
    model_vgg16.add(Conv2D(name="block2_conv2", filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    ### block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
    model_vgg16.add(MaxPool2D(name="block2_pool", pool_size=(2,2),strides=(2,2)))
    ### block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
    model_vgg16.add(Conv2D(name="block3_conv1", filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    ### block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
    model_vgg16.add(Conv2D(name="block3_conv2", filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    ### block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
    model_vgg16.add(Conv2D(name="block3_conv3", filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    ### block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
    model_vgg16.add(MaxPool2D(name="block3_pool", pool_size=(2,2),strides=(2,2)))
    ### block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
    model_vgg16.add(Conv2D(name="block4_conv1", filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    ### block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
    model_vgg16.add(Conv2D(name="block4_conv2", filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    ### block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
    model_vgg16.add(Conv2D(name="block4_conv3", filters=512, kernel_size=(3,3), padding="same", activation="relu"))    
    ### block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
    model_vgg16.add(MaxPool2D(name="block4_pool", pool_size=(2,2),strides=(2,2)))
    ### block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
    model_vgg16.add(Conv2D(name="block5_conv1", filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    ### block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
    model_vgg16.add(Conv2D(name="block5_conv2", filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    ### block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
    model_vgg16.add(Conv2D(name="block5_conv3", filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    ### block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
    model_vgg16.add(MaxPool2D(name="block5_pool", pool_size=(2,2),strides=(2,2)))
    ### flatten (Flatten)            (None, 25088)             0         
    model_vgg16.add(Flatten())
    ### fc1 (Dense)                  (None, 4096)              102764544 
    model_vgg16.add(Dense(name="fc1", units=4096,activation="relu"))
    ### fc2 (Dense)                  (None, 4096)              16781312  
    model_vgg16.add(Dense(name="fc2", units=4096,activation="relu"))
    ### dense (Dense)                (None, 2)                 8194      
    model_vgg16.add(Dense(name="dense", units=10, activation='softmax'))

    model_vgg16.summary()

    model_vgg16.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])

    train_batches, valid_batches, test_batches = hf_handler.process_data()
    model_vgg16.fit(x=train_batches,
            steps_per_epoch=len(train_batches),
            validation_data=valid_batches,
            validation_steps=len(valid_batches),
            epochs=2,
            verbose=2)

    import os.path
    if os.path.isfile('digit_vgg16_models_03_01.h5') is False:
        model_vgg16.save('models/digit_vgg16_models_03_01.h5')

    test_imgs, test_labels = next(test_batches)
    hf_handler.plotImages(test_imgs)
    print(test_labels)

    test_batches.classes
    test_batches.class_indices

    predictions = model_vgg16.predict(x=test_batches, 
        steps=len(test_batches), 
        verbose=0)

    cm = confusion_matrix(y_true=test_batches.classes, 
        y_pred=np.argmax(predictions, 
        axis=-1))

    cm_plot_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    hf_handler.plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')

    print("Got this far")