Beispiel #1
0
y_val = y_train[:10000]
partial_y_train = y_train[10000:]

models = models.Sequential()
models.add(layers.Dense(16, activation='relu', input_shape=(10000, )))
models.add(layers.Dense(16, activation='relu'))
models.add(layers.Dense(1, activation='sigmoid'))

models.compile(optimizer='rmsprop',
               loss='binary_crossentropy',
               metrics=['accuracy'])

history = models.fit(partial_x_train,
                     partial_y_train,
                     epochs=20,
                     batch_size=50,
                     validation_data=(x_val, y_val))

history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']

epochs = range(1, len(loss_values) + 1)

plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='validation loss')
plt.title('training and validation loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
Beispiel #2
0
# -*- coding: utf-8 -*-
#models.Sequential类,层的线性堆叠
from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(784, )))
model.add(layers.Dense(10, activation='softmax'))

#函数式API定义,可进行有向无环图网络的堆叠
input_tensor = layers.Input(shape=(784, ))
x = layers.Dense(32, activation='relu')(input_tensor)
output_tensor = layers.Dense(10, activation='softmax')(x)

#接下来的步骤相同
from keras import optimizers

model.compile(optimizer=optimizers.RMSprop(lr=0.001),
              loss='mse',
              metrics=['accuracy'])
models.fit(input_tensor, target_tensor, batch_size=128, epochs=10)
# Fully Connected Layer
model.add(Flatten())
model.add(Dense(128))  # Fully connected layer in Keras
model.add(Activation('relu'))

# dropout some neurons to reduce overfitting
model.add(Dropout(drop_prob))

# Readout layer
model.add(Dense(num_classes))
model.add(Activation('softmax'))

# Set loss and measurement, optimizer, and metric used to evaluate loss
model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',  # was adadelta
    metrics=['accuracy'])

#   Training settings
batch_size = 128
num_epoch = 2

# fit the training data to the model.  Nicely displays the time, loss, and validation accuracy on the test data
model.fit(train_images,
          mnist.train.labels,
          batch_size=batch_size,
          nb_epoch=num_epoch,
          verbose=1,
          validation_data=(test_images, mnist.test.labels))
Beispiel #4
0
models.compile(optimizer='rmsprop',
               loss='categorical_crossentropy',
               metrics=['accuracy'])

#create evaluation entity
x_val = x_train[:1000]
partial_x_train = x_train[1000:]

y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]

#history=models.fit(partial_x_train,partial_y_train,epochs=20, batch_size=512,validation_data=(x_val,y_val))

#best epoc is 9
#retrain
history = models.fit(x_train, one_hot_train_labels, epochs=9, batch_size=512)
results = models.evaluate(x_test, one_hot_test_labels)

print(results)
#check Ranom Accuracy withou Create Model

#check for real data
predictions = models.predict(x_test)
#export of network size - size of classifaction labels
print(predictions[0].shape)
#sum of predit is 1
print(np.sum(predictions[0]))
#best predit for topic label
print(np.argmax(predictions[0]))