Example #1
0
def main():

    seed = 10

    np.random.seed(seed)

    (dadoEntrada, dadoSaida), (dadoEntradaTeste,
                               dadoSaidaTeste) = cifar10.load_data()

    previsaoTreinamento, previsaoTeste = preImageProcessing(
        dadoEntrada=dadoEntrada, dadoEntradaTeste=dadoEntradaTeste)

    dummyRespTreinamento = np_utils.to_categorical(
        dadoSaida, 10)  # conversão dos dados categóricos de treinamento
    dummyRespTeste = np_utils.to_categorical(dadoSaidaTeste, 10)

    cNN = treinaCNN(alturaImagem=len(dadoEntrada[:][0]),
                    larguraImagem=len(dadoEntrada[0][:]))

    sequential_model_to_ascii_printout(cNN)

    cNN.fit(previsaoTreinamento,
            dummyRespTreinamento,
            batch_size=32,
            epochs=20,
            validation_data=(previsaoTeste, dummyRespTeste))

    resultado = cNN.evaluate(previsaoTeste, dummyRespTeste)

    print(cnn.history['acc'])

    vizualizaResultados(cNN)

    print("Accuracy: %.2f%%" % (resultado[1] * 100))
    sgd = SGD(lr=0.1, decay=1e-6, nesterov=True)

    # Train model

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model


cnn_n = base_model()
cnn_n.summary()

# Vizualizing model structure

sequential_model_to_ascii_printout(cnn_n)

# Fit model

cnn = cnn_n.fit(x_train,
                y_train,
                batch_size=batch_size,
                epochs=epochs,
                validation_data=(x_test, y_test),
                shuffle=True)

# Plots for training and testing process: loss and accuracy

plt.figure(0)
plt.plot(cnn.history['acc'], 'r')
plt.plot(cnn.history['val_acc'], 'g')
def constructModel(stream_res, num_detects, num_classes, model_plan,
                   file_path):

    file_path = file_path.decode("ascii")

    #Tells tensor flow I'm only using the GPU:
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    #Tells tensor flow not to allocate all posisble GPU memory to save some for the display:
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    print("Constructing model...")

    num_conv_layers = int(model_plan[0])

    if (num_conv_layers < 1):
        print("There must be at least one convoloutional layer, exiting.")
        exit(1)

    conv_kern_sizes_x = np.asarray(model_plan[1:num_conv_layers + 1],
                                   dtype=np.int32)
    conv_kern_sizes_y = np.asarray(model_plan[num_conv_layers +
                                              1:2 * num_conv_layers + 1],
                                   dtype=np.int32)
    conv_num_filters = np.asarray(model_plan[2 * num_conv_layers +
                                             1:3 * num_conv_layers + 1],
                                  dtype=np.int32)
    conv_batch_norm_present = np.asarray(model_plan[3 * num_conv_layers +
                                                    1:4 * num_conv_layers + 1],
                                         dtype=np.int32)
    conv_dropout_present = np.asarray(model_plan[4 * num_conv_layers +
                                                 1:5 * num_conv_layers + 1],
                                      dtype=np.int32)
    kern_dropouts = np.asarray(model_plan[5 * num_conv_layers +
                                          1:6 * num_conv_layers + 1],
                               dtype=np.float)

    conv_layer_end = 6 * num_conv_layers

    num_dense_layers = int(model_plan[conv_layer_end + 1])
    dense_num_outputs = np.asarray(
        model_plan[conv_layer_end + 2:conv_layer_end + 2 + num_dense_layers],
        dtype=np.int32)
    dense_dropouts_present = np.asarray(
        model_plan[conv_layer_end + 2 + num_dense_layers:conv_layer_end + 2 +
                   2 * num_dense_layers],
        dtype=np.int32)
    dense_dropouts = np.asarray(
        model_plan[conv_layer_end + 2 + 2 * num_dense_layers:conv_layer_end +
                   2 + 3 * num_dense_layers],
        dtype=float)

    learning_rate = model_plan[conv_layer_end + 2 + 3 * num_dense_layers]

    model = Sequential()

    for conv_idx in range(num_conv_layers):
        if (conv_idx == 0):
            model.add(
                Conv2D(filters=conv_num_filters[0],
                       kernel_size=(conv_kern_sizes_x[0],
                                    conv_kern_sizes_y[0]),
                       activation='relu',
                       input_shape=(stream_res, num_detects, 1)))
        else:
            model.add(
                Conv2D(filters=conv_num_filters[conv_idx],
                       kernel_size=(conv_kern_sizes_x[conv_idx],
                                    conv_kern_sizes_y[conv_idx]),
                       activation='relu'))

        if (conv_batch_norm_present[conv_idx] == 1):
            model.add(BatchNormalization())

        if (conv_dropout_present[conv_idx] == 1):
            model.add(Dropout(kern_dropouts[conv_idx]))

    model.add(Flatten())

    for dense_idx in range(num_dense_layers):
        model.add(Dense(dense_num_outputs[dense_idx], activation='softmax'))
        if (dense_dropouts_present[dense_idx] == 1):
            model.add(Dropout(dense_dropouts[dense_idx]))

    model.add(Dense(num_classes, activation='softmax'))

    # compile the model
    opt = optimizers.Nadam(lr=learning_rate,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=None,
                           schedule_decay=0.004)
    model.compile(loss='categorical_crossentropy', optimizer=opt)

    model.save(file_path + "/model_1")

    sequential_model_to_ascii_printout(model)
Example #4
0
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(400, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='tanh'))
model.add(Reshape((1, 2), input_shape=(2, )))

outputs = [layer.output for layer in model.layers]
print(outputs)
print(model.summary())

model.compile(loss='mean_squared_error', optimizer='Adam')

sequential_model_to_ascii_printout(model)

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size_NN,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score)

model.save('cnn.h5')
print(model.summary())
'''
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
# Eğitim modeli
    model.compile(loss=‘categorical_crossentropy’, optimizer=sgd, metrics=[‘accuracy’])
    return model
cnn_n = base_model()
cnn_n.summary()
# Uygun model
cnn = cnn_n.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test,y_test),shuffle=True)

def base_model():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding=‘same’, activation=‘relu’, input_shape=x_train.shape[1:]))
    model.add(Dropout(0.2))
    model.add(Conv2D(32,(3,3),padding=‘same’, activation=‘relu’))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Conv2D(64,(3,3),padding=‘same’,activation=‘relu’))
    model.add(Dropout(0.2))
    model.add(Conv2D(64,(3,3),padding=‘same’,activation=‘relu’))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Conv2D(128,(3,3),padding=‘same’,activation=‘relu’))
    model.add(Dropout(0.2))
    model.add(Conv2D(128,(3,3),padding=‘same’,activation=‘relu’))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(1024,activation=‘relu’,kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(num_classes, activation=‘softmax’))
Bu bölümde model yapısını görselleştirebiliriz. Bu problem için, Piotr Migdał tarafından sıralı modellerin mimarilerini ve parametrelerini araştırmak için Keras için bir kütüphane kullanabiliriz.
# Model yapısının görselleştirilmesi
sequential_model_to_ascii_printout(cnn_n)
Example #6
0
fc1 = Dense(1000, activation='relu')(flat)

# define mode output
output = Dense(10, activation='softmax')(fc1)


# create model
naive_model = Model(inputs=visible, outputs=output)

# summarize model

naive_model.summary()

# Vizualizing model structure
 
sequential_model_to_ascii_printout(naive_model)

# plot model architecture

plot_model(naive_model, show_shapes=True, to_file='naive_inception_module.png')

# Compile the model

naive_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

# Fitting the model

batch_size = 32   #64
epochs = 10     #150

# Train model (use 10% of training set as validation set)