Ejemplo n.º 1
0
print(model.summary())

# Compile Model
model.compile(optimizer='Adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
'''
train model
'''

batch_size = 256
num_epochs = 20

# Train Model
history = model.fit(trainX, trainY, batch_size=batch_size,
                    epochs=num_epochs)  #, callbacks=[checkpoint])

predY = model.predict(testX)
y_pred = np.argmax(predY, axis=1)
y_actual = np.argmax(testY, axis=1)
#y_label= [labels[k] for k in y_pred]
cm = confusion_matrix(y_actual, y_pred)
print(cm)
'''
confusion matrix
'''

import itertools


def plot_confusion_matrix(cm,
Ejemplo n.º 2
0
model.add(MaxPooling2D((2,2),strides=(2,2)))

model.add(Conv2D(512,(3,3),activation='relu',padding='same'))
model.add(Conv2D(512,(3,3),activation='relu',padding='same'))
model.add(Conv2D(512,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D((2,2),strides=(2,2)))

model.add(Flatten())
model.add(Dense(600,activation='relu'))
model.add(Dense(600,activation='relu'))
model.add(Dense(2,activation='softmax'))

model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

model.fit(train_record_fname,test_record_fname,epochs=2)

"""# Neuer Ansatz A)
Eine andere Herangehensweise mit anderen Funktionen und Befehlen. Könnte sich rentieren das Netz in diese Richtung auszubauen bzw Betandteile zu übernehmen.

---wurde noch nicht auf das bestehende Netz angepasst---
"""

### A) Neuer Ansatz CNN_model https://androidkt.com/feeding-your-own-data-set-into-the-cnn-model-in-tensorflow/
labels=label_map_pbtxt_fname



_DEFAULT_IMAGE_SIZE = 252
_NUM_CHANNELS = 3
_NUM_CLASSES = 4
model = Model(input_layer, output)

# Compiling model
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum)
model.compile(
    loss="sparse_categorical_crossentropy",
    optimizer=optimizer,
    metrics=["accuracy"]
)
model.summary()

# Train the model
history = model.fit(
    train_data,
    epochs=13,
    validation_data = test_data
)

"""# Saving and Recreating the trained model"""

## Save the whole model
model.save('./trained_CNN/imagewoof/my_model_imagewoof.h5')

## Recreate whole model
new_model=keras.models.load_model('./trained_CNN/imagewoof/my_model_imagewoof.h5')
new_model.summary()

"""# DOWNLOAD created files
In this case downloading the previously created model.
Ejemplo n.º 4
0
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

ip = Input(shape=(28, 28, 1))
x = Conv2D(16, (3, 3), activation='relu', padding='same')(ip)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dense(10, activation='softmax')(x)
x = Model(ip, x)

x.compile(optimizer='adam',
          loss='categorical_crossentropy',
          metrics=['accuracy'])
x.fit(x_train,
      y_train,
      epochs=10,
      batch_size=128,
      shuffle=True,
      verbose=1,
      validation_data=(x_test, y_test))
x.save("fashion_mnist2.h5")
Ejemplo n.º 5
0
model = Model(inputs=vgg.input, outputs=model)

for layer in vgg.layers:
    layer.trainable = False

# compile
print("[INFO] compiling model...")
opt = Adam(lr=lr, decay=lr / epochs)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# train
print("[INFO] training model...")
H = model.fit(x=X_train,
              y=y_train,
              batch_size=batch_size,
              steps_per_epoch=len(X_train) // batch_size,
              validation_data=(X_test, y_test),
              validation_steps=len(X_test) // batch_size,
              epochs=epochs)

# test
print("[INFO] evaluating model...")
predIdxs = model.predict(X_test, batch_size=batch_size)
predIdxs = np.argmax(predIdxs, axis=1)

print(
    classification_report(y_test.argmax(axis=1),
                          predIdxs,
                          target_names=lb.classes_))

# confusion matrix