def model1():
    numEpochs = 50
    lrRate = 1e-2
    lrRateDecay = lrRate/numEpochs
    model = Model.build(width=48, height=48, depth=3, classes=2)
    opt = Adagrad(lr=lrRate, decay=lrRateDecay)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    keras2ascii(model)
    #plot_model(model, to_file='model.png')
    return model
예제 #2
0
def plot_model(model, filename='model.png'):
    from keras.utils import plot_model
    plot_model(model,
               to_file=filename,
               show_layer_names=True,
               show_shapes=True)

    from keras_sequential_ascii import keras2ascii
    print(keras2ascii(model))
예제 #3
0
model.add(MaxPooling2D(pool_size=(2, 2)))
# 对于池化层的输出,采用0.35概率的Dropout
model.add(Dropout(0.35))
# 展平所有像素,比如[28*28] -> [784]
model.add(Flatten())
# 对所有像素使用全连接层,输出为128,激活函数选用relu
model.add(Dense(128, activation='relu'))
# 对输入采用0.5概率的Dropout
model.add(Dropout(0.5))
# 对刚才Dropout的输出采用softmax激活函数,得到最后结果0-9
model.add(Dense(num_classes, activation='softmax'))
# 模型我们使用交叉熵损失函数,最优化方法选用Adadelta
model.compile(loss=keras.metrics.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])
# 令人兴奋的训练过程
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))

score = model.evaluate(x_test, y_test, verbose=0)
model.save("model.h5")

keras2ascii(model)  # 显示模型可视化

print('Test loss:', score[0])
print('Test accuracy:', score[1])
    # EVALUATE MODELS
    # In[110]:
    score = model.evaluate(x_test, y_test, verbose=0)

    # Print out accuracy. Change to score[0] for loss.
    print "%s %.5f" % (f, score[1])

    # Could add some code to predict on a new sample
    # model.precict(new_sample)

    # Print visual of model layers
    #import keras_sequential_ascii as ksq
    #ksq.sequential_model_to_ascii_printout(cnn_n)
    from keras_sequential_ascii import keras2ascii
    keras2ascii(model)

    # Confusion Matrix
    from sklearn.metrics import classification_report, confusion_matrix

    Y_pred = model.predict(x_test, verbose=2)
    y_pred = np.argmax(Y_pred, axis=1)

    for ix in range(10):
        print(ix,
              confusion_matrix(np.argmax(y_test, axis=1), y_pred)[ix].sum())
    cm = confusion_matrix(np.argmax(y_test, axis=1), y_pred)
    print(cm)

    # Visualizing of confusion matrix
    import seaborn as sn
classificadorx2.add(Dropout(0.1))
classificadorx2.add(Dense(units = 128,
                         activation = 'relu',
                         use_bias=True,
                         kernel_initializer=initializers.RandomNormal(stddev=0.01),
                         bias_initializer='zeros'))
classificadorx2.add(Dropout(0.05))
classificadorx2.add(Dense(units = 18,
                        activation = 'sigmoid'))
classificadorx2.compile(optimizer = 'Adam',
                      loss = 'categorical_crossentropy',
                      metrics = ['accuracy'])

classificadorx2.summary()

keras2ascii(classificadorx2)

epochs = 500

learning_rate = ReduceLROnPlateau(monitor='accuracy',
                                  factor=0.1,
                                  patience=2,
                                  verbose=1,
                                  mode="auto",
                                  min_delta=0.001,
                                  cooldown=0,
                                  min_lr=0.01)

h_x2 = classificadorx2.fit(base_treino,
                           steps_per_epoch = 100,
                           epochs = epochs,
예제 #6
0
def build_model(lr, batch_size, epochs, activation_function, optimizer,
                conv_depth, model_name):
    graph = K.get_session().graph

    with graph.as_default():
        adam = Adam(lr=lr, decay=1e-6)
        sgd = SGD(lr=lr, decay=1e-6)
        rmsprop = RMSprop(lr=lr, decay=1e-6)
        optimizers = {
            'adam': adam,
            'sgd': sgd,
            'rmsprop': rmsprop,
        }

        model = Sequential()

        filters = 32
        model.add(
            Conv2D(filters,
                   kernel_size=(3, 3),
                   activation=activation_function,
                   input_shape=(32, 32, 3),
                   padding='same'))
        # model.add(Conv2D(filters, kernel_size=(3, 3),
        #                 activation=activation_function,
        #                 padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(rate=0.25, seed=123))

        filters *= 2

        for _ in range(conv_depth - 1):
            model.add(
                Conv2D(filters,
                       kernel_size=(3, 3),
                       activation=activation_function,
                       padding='same'))
            # model.add(Conv2D(filters, kernel_size=(3, 3),
            #                 activation=activation_function,
            #                 padding='same'))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Dropout(rate=0.25, seed=123))
            filters *= 2

        model.add(Flatten())
        for _ in range(1):
            model.add(Dense(1024, activation=activation_function))
            model.add(Dropout(rate=0.5, seed=123))

        num_classes = 10
        model.add(Dense(num_classes, activation='softmax'))

        print(optimizers[optimizer])

        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

        keras2ascii(model)

        X_train, Y_train, X_test, Y_test = load_data(num_classes)

        history = model.fit(X_train,
                            Y_train,
                            batch_size=batch_size,
                            shuffle=True,
                            epochs=epochs,
                            validation_data=(X_test, Y_test),
                            verbose=1)

        save_model(model, model_name)

        make_plot(history, epochs, model_name)