Пример #1
0
for layer in conv_model.layers:
    layer.trainable = False
print_layer_trainable()
new_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
epochs = 5
steps_per_epoch = 50
history = new_model.fit_generator(generator=generator_train,
                                  epochs=epochs,
                                  steps_per_epoch=steps_per_epoch,
                                  class_weight=class_weight,
                                  validation_data=generator_test,
                                  validation_steps=steps_test)

# plot_training_history(history)

result = new_model.evaluate_generator(generator_test, steps=steps_test)
print("Test-set classification accuracy: {0:.2%}".format(result[1]))

# FINE TUNING
print("\nFINE TUNING\n")
conv_model.trainable = True

for layer in conv_model.layers:
    # Boolean whether this layer is trainable.
    trainable = ('block5' in layer.name or 'block4' in layer.name)

    # Set the layer's bool.
    layer.trainable = trainable

print_layer_trainable()
model.add(Dropout(0.4))
model.add(Dense(100))   # на выходе 100 классов (категорий)
model.add(Activation('softmax'))

# компилируем модель - задаем параметры для обучения
model.compile(loss="categorical_crossentropy", optimizer='SGD', metrics=["accuracy"])
print(model.summary())

# обучаем модель с использованием генераторов
model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

# оцениваем качество обучения сети на тренировочных данных
scores = model.evaluate_generator(train_generator, nb_train_samples // batch_size)
print("Аккуратность работы на тренировочных данных: %.2f%%" % (scores[1]*100))

# оцениваем качество обучения сети на проверочных данных
scores = model.evaluate_generator(val_generator, nb_validation_samples // batch_size)
print("Аккуратность работы на валидационных данных: %.2f%%" % (scores[1]*100))

# оцениваем качество обучения сети на тестовых данных
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
print("Аккуратность работы на тестовых данных: %.2f%%" % (scores[1]*100))

# сохраняем обученную сеть
# генерируем описание модели в формате json
model_json = model.to_json()
json_file = open("faces_NN.json", "w")
# записываем архитектуру сети в файл
Пример #3
0
print_layer_trainable()

main_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

main_model.summary()

epochs = 1

main_history = main_model.fit_generator(generator=generator_train,
                                        epochs=epochs,
                                        steps_per_epoch=steps_per_epoch,
                                        validation_data=generator_test,
                                        validation_steps=steps_test)

main_result = main_model.evaluate_generator(generator_test, steps=steps_test)
print("Test-set classification accuracy: {0:.2%}".format(main_result[1]))

conv_model.trainable = True

for layer in conv_model.layers:
    trainable = ('block5' in layer.name or 'block4' in layer.name)
    layer.trainable = trainable

print_layer_trainable()

optimizer_fine = Adam(lr=1e-7)

main_model.compile(optimizer=optimizer_fine, loss=loss, metrics=metrics)

main_fine_history = main_model.fit_generator(generator=generator_train,
Пример #4
0
    def buttonClicked(self):

        train_dir = 'train' # Каталог с данными для обучения
        val_dir = 'val' # Каталог с данными для проверки
        test_dir = 'test' # Каталог с данными для тестирования
        img_width, img_height = 150, 150 # Размеры изображения
        input_shape = (img_width, img_height, 3) # Размерность тензора на основе изображения для входных данных в нейронную сеть
        
        epochs = self.InputEpochs.value()
        Nclasses = self.InputClass.value()
        batch_size = self.InputBatch.value()
        nb_train_samples = self.InputTrain.value()
        nb_validation_samples = self.InputValidation.value()
        nb_test_samples = self.InputTest.value()
        
        model = Sequential()
        model.add(Conv2D(32, (3, 3), input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(768))
        model.add(Activation('selu'))
        model.add(Dropout(0.5))
        model.add(Dense(Nclasses))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
            optimizer='adam',
            metrics=['accuracy'])

        datagen = ImageDataGenerator(rescale=1. / 255)

        train_generator = datagen.flow_from_directory(
            train_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        val_generator = datagen.flow_from_directory(
            val_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        test_generator = datagen.flow_from_directory(
            test_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        model.fit_generator(
            train_generator,
            steps_per_epoch=nb_train_samples // batch_size,
            epochs=epochs,
            validation_data=val_generator,
            validation_steps=nb_validation_samples // batch_size)

        scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)

        print("Аккуратность на тестовых данных: %.2f%%" % (scores[1]*100))

        model.save('TestModel.h5')
Пример #5
0
                                              class_mode='binary')

val_generator = datagen.flow_from_directory(val_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=batch_size,
                                            class_mode='binary')

test_generator = datagen.flow_from_directory(test_dir,
                                             target_size=(img_width,
                                                          img_height),
                                             batch_size=batch_size,
                                             class_mode='binary')

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=2,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

scores = model.evaluate_generator(test_generator,
                                  nb_test_samples // batch_size)
print("Accuracy: %.2f%%" % (scores[1] * 100))

# Base accuracy: 90.81%
# V2 - 512 neurons: 91.43%
# V2 - 128 neurons: 91.27%
# V2 - 256 neurons: 91.11%
# V2 - 1024 neurons: 91.30%
# V2 - 512 > 256: 91.62%
# plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()

test_generator = CV2EmbeddingDataGenerator(testScores,
                                           embeddings.embedding,
                                           maxlen=maxlen,
                                           batch_size=batch_size)

scoreSeg = model.evaluate_generator(
    test_generator.data_generation_NN(batch_size), test_generator.__len__())

print("loss = ", scoreSeg)


# pearson=nanpearson(testScores[:][1],score_pearson)
def pearson_r(x, y):
    """Compute Pearson correlation coefficient between two arrays."""

    # Compute correlation matrix
    corr_mat = np.corrcoef(x, y)

    # Return entry [0,1]
    return corr_mat

    Conv2D(64, (1, 3),
           activation='relu',
           input_shape=(1, N_channels * 30 * FreqSample // step, 1)))
conv.add(MaxPooling2D((1, 2)))

conv.add(Conv2D(128, (1, 3), activation='relu'))
conv.add(MaxPooling2D((1, 2)))

conv.add(Conv2D(256, (1, 3), activation='relu'))
conv.add(MaxPooling2D((1, 2)))

conv.add(Flatten())
conv.add(Dense(64, activation='relu'))
conv.add(Dropout(0.5))
conv.add(Dense(5, activation='softmax'))

conv.summary()
conv.load_weights(model_file)
conv.compile(loss='categorical_crossentropy',
             optimizer='adam',
             metrics=['accuracy'])

print(conv.model.metrics_names)

score = conv.evaluate_generator(test_gen, steps=3000)

raw_labels.close()
raw_features.close()

print(score)
#print conv.evaluate(X_test_CNN, y_test_CNN) # this returns [test_loss, test_acc] after maximum epochs
Пример #8
0
batch_size=16

epoch=5
steps=train_samples/batch_size

data_generator_with_aug=ImageDataGenerator(horizontal_flip=True,width_shift_range=0.2,rescale=1./255,height_shift_range=0.2,shear_range=0.2,zoom_range=0.2)
data_generator_with_no_aug=ImageDataGenerator(preprocessing_function=None)


train_gen=data_generator_with_aug.flow_from_directory(directory='/content/gdrive/My Drive/chest_xray/train/',target_size=(image_size,image_size),batch_size=32,class_mode='binary')
val_gen=data_generator_with_no_aug.flow_from_directory(directory='/content/gdrive/My Drive/chest_xray/val/',target_size=(image_size,image_size),batch_size=16,class_mode='binary')
test_gen=data_generator_with_no_aug.flow_from_directory(directory='/content/gdrive/My Drive/chest_xray/test/',target_size=(image_size,image_size),batch_size=32,class_mode='binary')

model.compile(loss='binary_crossentropy',optimizer="adam",metrics=['acc'])
m=model.fit_generator(train_gen,epochs=epoch,validation_data=val_gen)
scores=model.evaluate_generator(test_gen)
print("Accuracy: ",scores[1]*100)

model.save('final_model.hdf5')

plt.plot(model.history.history['acc'])
plt.plot(model.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training set', 'Validation set'], loc='lower left')
plt.show()

plt.plot(model.history.history['loss'])
plt.plot(model.history.history['val_loss'])
plt.title('Model Loss')
Пример #9
0
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epoch,
        callbacks=[checkpoint],
        verbose=1,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size)

        
#model.save_weights("drive/bitirme2/weights/weight2.h5")


#%%
############# EVALUATION ##############
#evaluation
model.load_weights("/content/drive/My Drive/bitirme2/weights/weight23.h5")
score = model.evaluate_generator(validation_generator, nb_validation_samples // batch_size, verbose = 1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])


# Plot training & validation accuracy values
plt.plot(history_model.history['accuracy'])
plt.plot(history_model.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# Plot training & validation loss values
plt.plot(history_model.history['loss'])
model.add(
    layers.LSTM(1000,
                dropout=0.1,
                recurrent_dropout=0.1,
                return_sequences=True))
model.add(
    layers.LSTM(1000,
                dropout=0.1,
                recurrent_dropout=0.1,
                return_sequences=True))
model.add(
    layers.LSTM(1000,
                dropout=0.1,
                recurrent_dropout=0.1,
                return_sequences=True))
model.add(layers.LSTM(1000, dropout=0.1, recurrent_dropout=0.1))
model.add(layers.Dense(5, activation="softmax"))
model.summary()
model.load_weights(model_file)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.Adam(lr=0.0001, decay=0.0001),
              metrics=['accuracy'])

print(model.model.metrics_names)
score = model.evaluate_generator(test_gen, steps=3000)

raw_labels.close()
raw_features.close()

print(score)
Пример #11
0
    model2.add(Dense(512, activation='relu'))
    model2.add(Dense(10, activation='softmax'))
    model2.compile(optimizer='RMSprop',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

    history = model2.fit_generator(train_generator,
                                   steps_per_epoch=144,
                                   epochs=27,
                                   validation_data=validation_generator,
                                   validation_steps=48)
    model2.save('arduino11.h5')
    # model2 = tf.keras.models.load_model('arduino10.h5')
    # arduino10.h5 acc:92 val_acc:95 test:94

    test_loss, test_acc = model2.evaluate_generator(test_generator)
    print(test_acc)
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    plt.figure()
    plt.plot(acc)
    plt.plot(val_acc)
    plt.legend(['Accuracy', 'Validation Accuracy'])
    plt.show()

    plt.figure()
    i = 0
    for i in range(9):
        plt.subplot(3, 3, i + 1)
        plt.xticks([])
        plt.yticks([])
print(training_set.class_indices)
print('You Have :', len(training_set.class_indices), 'Class')

model.summary()

model_info = model.fit_generator(training_set,
                                 steps_per_epoch=count // batch_size,
                                 epochs=10,
                                 validation_data=validation_set,
                                 validation_steps=378 // batch_size)

model.save_weights('my_model_weights.h5')  #save model

#model.load_weights('my_model_weights.h5') #load model

scoreSeg = model.evaluate_generator(test_set, 400)
test_set.reset()
predict = model.predict_generator(test_set, 400)

print('***tahmin Değerleri***')
print(np.argmax(predict, axis=1))
print('***Gerçek Değerleri***')
print(test_set.classes)

print(test_set.class_indices)

pred = np.argmax(predict, axis=1)

print("Confusion Matrix")
print(confusion_matrix(test_set.classes, pred))