save_best_only=True,
                             mode='max',
                             period=2)
tensorboard = TensorBoard(log_dir='./logs', batch_size=batch_size)
callbacks_list = [checkpoint, tensorboard]

epochs = 25
steps_per_epoch = 100
number_of_validation_batches = generator_validation.n / batch_size
history = new_model.fit_generator(
    generator=generator_train,
    epochs=epochs,
    steps_per_epoch=steps_per_epoch,
    validation_data=generator_validation,
    validation_steps=number_of_validation_batches,
    callbacks=callbacks_list)
acc = history.history['categorical_accuracy']

#create dataloader fr this one.
prediction = new_model.predict_generator(generator_test)
print(type(prediction), type(test_ID_list))
pred_name = np.argmax(prediction, axis=1)
mat = cfm(generator_test.classes, pred_name)
print("confusion matrix")
print(mat)
true = 0.0
for i in range(len(mat)):
    true += mat[i][i]
acc = true / len(test_ID_list)
print("test accuracy", acc)
Esempio n. 2
0
nb_samples = test_df.shape[0]

# Create test ImageDataGenerator objects
test_gen = ImageDataGenerator(rescale=1. / 255,
                              preprocessing_function=preprocess_input)
test_generator = test_gen.flow_from_dataframe(test_df,
                                              "MaskDataset/test",
                                              x_col='filename',
                                              y_col=None,
                                              class_mode=None,
                                              target_size=IMAGE_SIZE,
                                              batch_size=batch_size,
                                              shuffle=False)

#Predict test images
predict = model.predict_generator(test_generator,
                                  steps=np.ceil(nb_samples / batch_size))

#divide 3 categories into the prediction
test_df['category'] = np.argmax(predict, axis=-1)
label_map = dict((v, k) for k, v in train_generator.class_indices.items())
test_df['category'] = test_df['category'].replace(label_map)
test_df['category'] = test_df['category'].replace({
    0: 'NO PERSON',
    1: 'ALL THE PEOPLE',
    2: 'SOME'
})

#saving result in csv file
submission_df = test_df.copy()
submission_df['Id'] = submission_df['filename']
submission_df['Category'] = submission_df['category']
Esempio n. 3
0
# batch_size can be 1 or any factor of test dataset size to ensure that test dataset is samples just once, i.e., no data is left out
test_generator = data_generator.flow_from_directory(
    directory='EarthonCanvas/AerialPhoto',
    target_size=(image_size, image_size),
    batch_size=BATCH_SIZE_TESTING,
    class_mode=None,
    shuffle=False,
    seed=123)

#Need to compile layer[0] for extracting the 2048- dim features.
model.layers[0].compile(optimizer=sgd,
                        loss=OBJECTIVE_FUNCTION,
                        metrics=LOSS_METRICS)

test_generator.reset()
#extracted features
pred = model.layers[0].predict_generator(test_generator,
                                         steps=len(test_generator),
                                         verbose=1)

fname = test_generator.filenames
sio.savemat('imagemat.mat', mdict={'feature': pred, 'label': fname})

#Predicted labels
pred2 = model.predict_generator(test_generator,
                                steps=len(test_generator),
                                verbose=1)
predicted_class_indices = np.argmax(pred2, axis=1)  #-1
print(sum(predicted_class_indices == test_generator.classes) / 420)
train_generator = data_generator.flow_from_directory(
    'C:/Users/USER/Desktop/resnet50/images/train',
    target_size=(image_size, image_size),
    batch_size=24,
    class_mode='categorical')

validation_generator = data_generator.flow_from_directory(
    'C:/Users/USER/Desktop/resnet50/images/val',
    target_size=(image_size, image_size),
    class_mode='categorical')

history = my_new_model.fit_generator(train_generator,
                                     steps_per_epoch=500,
                                     epochs=10,
                                     validation_data=validation_generator,
                                     validation_steps=100)
from keras.models import load_model

my_new_model.save_weights("C:/Users/USER/Desktop/resnet50/my_model.h5"
                          )  # creates a HDF5 file 'my_model.h5'
Y_pred = my_new_model.predict_generator(validation_generator, 364 // 25)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(validation_generator.classes, y_pred))
print('Classification Report')
target_names = ['Cardboard', 'paper', 'plastic']
print(
    classification_report(validation_generator.classes,
                          y_pred,
                          target_names=target_names))
Esempio n. 5
0
# batch halinde alınan resimleri veriyoruz
# steps per epoch her epochta kaç adımda verilerin tümünü alıcak toplam veri sayısı bölü batch boyutu
model.fit_generator(train_batches,
                    steps_per_epoch=4,
                    validation_data=valid_batches,
                    validation_steps=4,
                    epochs=5)

test_images, test_labels = next(test_batches)
print(test_batches.class_indices)  # cat 0. indis dog 1. indis

test_labels = test_labels[:, 0]
print(test_labels)

predictions = model.predict_generator(test_batches, steps=1)

for i in predictions:
    print(i)

Categories = ['Cat', 'Dog']


def prepare(im):
    im = cv2.imread(im)
    im = cv2.resize(im, (224, 224))
    return im.reshape(-1, 224, 224, 3)


predictions2 = model.predict(prepare('kopek.jpg'))
print(predictions2)
    with open('Model_resnet5x.json') as f:
        model = model_from_json(
            f.read())  #load a pre-existing model architechure
    model.compile(optimizer=sgd, loss=OBJECTIVE_FUNCTION, metrics=LOSS_METRICS)
    model.load_weights('Model_resnet_weights.h5')  # load weights of the model

    test_generator = data_generator.flow_from_directory(
        directory=
        '/ysm-gpfs/pi/gerstein/aj557/data_deeppath/data_slides_resnet/test_slides5x/',
        target_size=(image_size, image_size),
        batch_size=BATCH_SIZE_TESTING,
        class_mode='categorical')

    pred = model.predict_generator(
        test_generator, steps=len(test_generator)
    )  #returns probalities for classs corresponding to each test tile
    predicted_class_indices = np.argmax(
        pred, axis=1)  #converts probabilties into the labels
    #results_df contains tile id's and label predicted for each tile
    results_df = pd.DataFrame({
        'id': pd.Series(test_generator.filenames),
        'label': pd.Series(predicted_class_indices)
    })
    results_df['id'] = results_df.id.str.extract('(\d+)')
    results_df['id'] = pd.to_numeric(results_df['id'], errors='coerce')
    results_df.sort_values(by='id', inplace=True)

    predict = []
    for i in range(len(pred)):
        if (pred[i][0] >= 0.5):
Esempio n. 7
0
    #metrics=['accuracy']
)

model.fit(
    train_generator,  # training data
    steps_per_epoch=80,
    epochs=25,
    batch_size=200,
    ##validation_split =0.1,
    ##class_weight = { 0 : 1, 1: 2}
)

results = model.evaluate(test_generator)
print('test loss, test acc:', results)

pred = model.predict_generator(test_generator, verbose=1)
predicted_class_indices = np.argmax(pred, axis=1)
labels = (train_generator.class_indices)
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
report = classification_report(np.argmax(pred, axis=1),
                               predicted_class_indices,
                               digits=20)
print(report)

print(predicted_class_indices)
##print(predictions)
##print (pred) ##-- probability of each class unnormalised

X_test, y_test = test_generator.next()
Esempio n. 8
0
test_gen = generator_with_preprocessing(test_lists, batch_size)

train_steps = math.ceil(len(train_lists) / batch_size)
val_steps = math.ceil(len(val_lists) / batch_size)
test_steps = math.ceil(len(test_lists) / batch_size)

# Learn the model
epochs = 30
autoencoder.fit_generator(generator=train_gen,
                          steps_per_epoch=train_steps,
                          epochs=epochs,
                          validation_data=val_gen,
                          validation_steps=val_steps)

# Predict using the learned network
preds = autoencoder.predict_generator(test_gen, steps=test_steps, verbose=0)

# Post processing - pick up "AB" components from test data as y_test
x_test = []
y_test = []
for i, (l,
        ab) in enumerate(generator_with_preprocessing(test_lists, batch_size)):
    x_test.append(l)
    y_test.append(ab)
    if i == (test_steps - 1):
        break
x_test = np.vstack(x_test)
y_test = np.vstack(y_test)

#  Post processing - concatenate "L" as x_test & "AB" as preds
test_preds_lab = np.concatenate((x_test, preds), 3).astype(np.uint8)
model.summary()

model_info = model.fit_generator(training_set,
                                 steps_per_epoch=count // batch_size,
                                 epochs=10,
                                 validation_data=validation_set,
                                 validation_steps=378 // batch_size)

model.save_weights('my_model_weights.h5')  #save model

#model.load_weights('my_model_weights.h5') #load model

scoreSeg = model.evaluate_generator(test_set, 400)
test_set.reset()
predict = model.predict_generator(test_set, 400)

print('***tahmin Değerleri***')
print(np.argmax(predict, axis=1))
print('***Gerçek Değerleri***')
print(test_set.classes)

print(test_set.class_indices)

pred = np.argmax(predict, axis=1)

print("Confusion Matrix")
print(confusion_matrix(test_set.classes, pred))

print("Results")
print(
Esempio n. 10
0
class Model(object):
    def __init__(self):
        self.embedding_size = 3
        self.epochs = 10
        self.hidden_state_size = 16
        self.data_sequence = DataPreprocessor(64, train=True)
        self.data_sequence.tokenizer.save_vocab()
        self.val_sequence = DataPreprocessor(64, train=False)
        self.history = None
        self.model_path: str = None
        self.model: KerasModel = None

    def build(self):
        self.model = Sequential()
        self.model.add(
            Embedding(self.data_sequence.vocab_size(), self.embedding_size))
        self.model.add(LSTM(self.hidden_state_size))
        self.model.add(Dense(1, activation='sigmoid'))

        self.model.compile('adam', loss='binary_crossentropy', metrics=['acc'])
        self.model.summary()

        try:
            file_name = 'model'
            plot_model(self.model,
                       to_file=f'{file_name}.png',
                       show_shapes=True)
            print(f"Model built. Saved {file_name}.png\n")
        except (ImportError, FileNotFoundError, OSError):
            print(f"Skipping plotting of model due to missing dependencies.")

    def train(self, path: str = None):
        self.model_path = path or f"models/model_emb{self.embedding_size}_epochs{self.epochs}.hdf5"
        checkpoint = ModelCheckpoint(
            f"models/checkpoint_emb{self.embedding_size}_epochs" +
            '{epoch:02d}.hdf5',
            verbose=1)
        self.history = self.model.fit_generator(
            self.data_sequence,
            callbacks=[checkpoint],
            epochs=self.epochs,
            shuffle=True,
            validation_data=self.val_sequence,
            max_queue_size=1024)

        self.model.save(self.model_path)
        self.plot_training()

    def predict(self, data: List[str] = None, model_path: str = None):
        if self.model is None and model_path is not None:
            print(f"Loading model from {model_path}.")
            self.model = load_model(model_path)
            self.data_sequence.tokenizer = Tokenizer.from_vocab()
        elif self.model is None:
            print(f"No model file provided. Training new model.")
            self.build()
            self.train()

        pred_sequence = PredictionSequence(self.data_sequence, data)
        predictions = self.model.predict_generator(pred_sequence,
                                                   steps=len(pred_sequence))
        for index, sample in enumerate(pred_sequence.samples):
            prediction = predictions[index][0]
            print(f"Predicted for sample {sample}: {prediction}")

    def plot_training(self):
        """Plot graphs"""
        # Load the training statistics (model.history)
        # Plot training loss and accuracy
        history = self.history.history
        json.dump(history,
                  open(f"model_history_backup_e{self.epochs}.json", "w"))
        epochs = range(1, self.epochs + 1)

        fig = plt.figure()
        plt.plot(epochs, history['loss'], 'bo', label='Training loss')
        plt.plot(epochs, history['val_loss'], 'b', label='Validation loss')
        plt.title('Training and validation loss')
        plt.xlabel('Epochs')
        plt.ylabel('Loss')
        plt.legend()
        fig.savefig(f"plot_loss_e{self.epochs}.pdf", bbox_inches='tight')
        plt.clf()

        # Plot validation loss and accuracy
        fig = plt.figure()
        plt.plot(epochs, history['acc'], 'bo', label='Training acc')
        plt.plot(epochs, history['val_acc'], 'b', label='Validation acc')
        plt.title('Training and validation accuracy')
        plt.xlabel('Epochs')
        plt.ylabel('Accuracy')
        plt.legend()
        fig.savefig(f"plot_acc_e{self.epochs}.pdf", bbox_inches='tight')