コード例 #1
0
def evaluate():
    args = parse_args()
    model_name = args.model_name
    dataset_path = args.dataset_path
    Model, preprocess_input, size = get_model_artifacts(model_name)
    idg = ImageDataGenerator(preprocessing_function=preprocess_input)
    model = Model(weights="imagenet")
    model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
    out = model.evaluate(
        idg.flow_from_directory(dataset_path, target_size=(size, size)))
    print(out)
コード例 #2
0
ファイル: mobilenet_eval.py プロジェクト: davidepatti/cnn
    print('--> Starting evalutation...')
    from keras.preprocessing.image import ImageDataGenerator
    from keras import metrics

    def in_top_k(y_true, y_pred):
        return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)

    val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    validation_generator = val_datagen.flow_from_directory(
        './imagenet-data/validation',
        target_size=(224, 224),
        batch_size=10,
        class_mode='categorical',
        shuffle=False)

    model.trainable = False
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy', in_top_k])

    results = model.evaluate(validation_generator,
                             steps=5000,
                             workers=1,
                             max_queue_size=1)

    print('--> Results for ' + sys.argv[1])
    print(model.metrics_names)
    print(results)

#########################################
コード例 #3
0
model.compile(optimizer=SGD(lr=1e-4, momentum=0.9, nesterov=True),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()
"""# Modelin Oluşturulması"""

history = model.fit_generator(datagen.flow(x_test,
                                           y_test,
                                           batch_size=batch_size),
                              validation_data=(x_test, y_test),
                              steps_per_epoch=len(x_train) // batch_size,
                              epochs=epochs)
"""# Sonuçların Görselleştirilmesi"""

score = model.evaluate(x_test, y_test, verbose=0)
print('Test Loss:', score[0])
print('Test Accuracy:', score[1])

print(history.history.keys())
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
コード例 #4
0
lb = LabelBinarizer()
lb.fit(np.asarray(data['primary_microconstituent']))
y = lb.transform(labels)
print('\nLabels Binarized, converting array')


input = np.asarray(processed_imgs)

X_train, X_test, y_train, y_test = train_test_split(
    input, y, test_size=0.1, random_state=42)


model = MobileNet(weights=None, classes = 7)

model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer = 'sgd', metrics = ['accuracy'])
time_callback = TimeHistory()
model.fit(X_train, y_train, epochs = 5, batch_size = 32, validation_data=(X_test, y_test), callbacks=[time_callback])
name = 'results/UHCS_MobileNet_Weights'
score = model.evaluate(X_test, y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save_weights(name+'.h5')

times = time_callback.times
file = open('MobileNet.txt', 'w')
file.write('Test score:'+ str(score[0])+'\n')
file.write('Test accuracy:'+ str(score[1])+'\n')
file.write(times)
file.close()