Esempio n. 1
0
    def run_model_test(self, epochs: int = 10):
        model = Sequential()
        model.add(LSTM(10, input_shape=(self.look_back, 1)))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')
        history = model.fit_generator(self.train, epochs=epochs)

        model.evaluate_generator(test_data_gen)
        trainPredict = model.predict_generator(train_data_gen)
        testPredict = model.predict_generator(test_data_gen)

        return history
Esempio n. 2
0
def train(model_path, dataset_path, train_params, add_timestamp):

    if add_timestamp:
        artefacts_path = os.path.join(model_path,
                                      time.strftime('%Y-%m-%d-%H-%M-%S'))
    else:
        artefacts_path = model_path

    dataset = LocalTextCategorizationDataset(
        dataset_path,
        batch_size=train_params['batch_size'],
        min_samples_per_label=train_params['min_samples_per_label'],
        preprocess_text=embed)

    logger.info(dataset)

    model = Sequential()
    model.add(
        Dense(train_params['dense_dim'],
              activation='relu',
              input_shape=(768, )))
    model.add(Dense(dataset.get_num_labels(), activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    train_history = model.fit(
        dataset.get_train_sequence(),
        validation_data=dataset.get_test_sequence(),
        epochs=train_params['epochs'],
        verbose=train_params['verbose'],
        workers=train_params['workers'],
        use_multiprocessing=train_params['use_multiprocessing'])

    scores = model.evaluate_generator(dataset.get_test_sequence(), verbose=0)
    logger.info("Test Accuracy: {:.2f}".format(scores[1] * 100))

    os.makedirs(artefacts_path, exist_ok=True)

    model.save(os.path.join(artefacts_path, "model.h5"))

    with open(os.path.join(artefacts_path, "params.json"), "w") as f:
        json.dump(train_params, f)

    with open(os.path.join(artefacts_path, 'labels_index.json'), 'w') as f:
        json.dump(dataset.get_label_to_index_map(), f)

    # train_history.history is not JSON-serializable because it contains numpy arrays
    serializable_hist = {
        k: [float(e) for e in v]
        for k, v in train_history.history.items()
    }
    with open(os.path.join(artefacts_path, "train_output.json"), "w") as f:
        json.dump(serializable_hist, f)

    return scores[1], artefacts_path
Esempio n. 3
0
    def evaluate(self, generator, steps=100):
        
        train_model = Sequential([self.extractor(), self.classifier()])
        train_model.summary()
        
        train_model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['acc'])

        score = train_model.evaluate_generator(generator, steps=steps)
                    
        return {'loss':score[0], 'acc':score[1]}
Esempio n. 4
0
results = pd.DataFrame(model.history.history)
results.plot()
results[['loss','val_loss']].plot()
results[['accuracy','val_accuracy']].plot()


# =============================================================================
# Saving the model on the system in the working directory
# =============================================================================
model.save('Flower_Classifier_h5')


# =============================================================================
# Evaluting the model on the test set
# =============================================================================
print('Loss: ',model.evaluate_generator(test_image_gen)[0],
      'Accuracy: ',model.evaluate_generator(test_image_gen)[1])

# =============================================================================
# Predicting the a new image 
# You can either choose from the test set or use new image
# =============================================================================


test_img = (# Enter the correct path of the new image that will be used for making predictions )
            # In my case a random image of the daisy flower from test set was picked.
            # Note: We know that the image is of a daisy flower but the model doesn't know.
            # Lets check it. 
plt.imshow(imread(test_img))  # Displaying the new image

from tensorflow.keras.preprocessing import image
Esempio n. 5
0
            datum[data] = convert(data_path, args.delimiter, args.batch,
                                  tokenizer, args.length, path)

    # fit network
    train_generator = DataGenerator(datum['train'], args.batch, args.length,
                                    vocab_size, tokenizer)
    valid_generator = DataGenerator(datum['dev'], args.batch, args.length,
                                    vocab_size, tokenizer)

    model.fit_generator(
        generator=train_generator,
        validation_data=valid_generator,
        steps_per_epoch=int(np.ceil(datum['train'].shape[0] / args.batch)),
        validation_steps=int(np.ceil(datum['dev'].shape[0] / args.batch)),
        epochs=args.epoch,
        use_multiprocessing=False,
        verbose=1)

    # Test the model
    test_generator = DataGenerator(datum['test'], args.batch, args.length,
                                   vocab_size, tokenizer)
    results = model.evaluate_generator(
        generator=test_generator,
        steps=int(np.ceil(datum['test'].shape[0] / args.batch)))
    print('loss: %s' % results[0])
    print('perplexity: %s' % results[1])

    # Save the model to files
    open(os.path.join(args.out, 'rnnlm.yaml'), 'w').write(model.to_yaml())
    model.save_weights(os.path.join(args.out, 'rnnlm.hdf5'))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(1, activation='sigmoid'))


model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

model.summary()  # Summary of the model

model.fit_generator(train_gen, epochs=20, callbacks=[plot], validation_data=validation_gen)

model.save('malariaModel.h5')

model.evaluate_generator(validation_gen)

model.metrics_names

"""# Predicting the cell images"""

import numpy as np
from google.colab import files
from keras.preprocessing import image

uploaded = files.upload()

for fn in uploaded.keys():
 
  # predicting images
  path = fn
# classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
# classifier.add(Dropout(0.5))

classifier.add(Dense(units=1, activation='sigmoid'))

classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

history = classifier.fit_generator(generator=training_generator,
                                   steps_per_epoch=STEP_SIZE_TRAIN,
                                   validation_data=valid_generator,
                                   validation_steps=STEP_SIZE_VALID,
                                   epochs=10)
score = classifier.evaluate_generator(generator=valid_generator,
                                      steps=STEP_SIZE_VALID)
#print(classifier.predict(valid_generator))
#model detector
classifier.save('mole_detector.h5')

fig, ax = plt.subplots(1, 2, figsize=(10, 3))
ax = ax.ravel()
for i, met in enumerate(['accuracy', 'loss']):
    ax[i].plot(history.history[met])
    ax[i].plot(history.history['val_' + met])
    ax[i].set_title('Model {}'.format(met))
    ax[i].set_xlabel('epochs')
    ax[i].set_ylabel(met)
    ax[i].legend(['train', 'val'])
plt.savefig('CNN_Loss_Accuracy_plots')
        plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
             label=name.title()+' Training')

    plt.xlabel('Epochs')
    plt.ylabel(key.replace('_', ' ').title())
    plt.legend()

    plt.xlim([0, max(history.epoch)])
    plt.show()


# Выводим график точности в процессе обучения модели:
plot_history([('Model', history)])

# Оцениваем точность модели на тестовых данных:
test_loss = model.evaluate_generator(test_data_gen)
print(f'\nTest loss (MSE): {test_loss}')

# Делаем прогноз для тестовых данных:
prediction = model.predict_generator(test_data_gen)
prediction = scaler_y.inverse_transform(prediction)

# График с фактическими и прогнозными котировками для тестовых данных:
plt.plot_date(data_y[-87:].index, data_y[-87:].values,
              linestyle='solid', marker=None, label='Фактические котировки')
plt.plot_date(data_y[-87:].index, prediction.ravel(),
              linestyle='solid', marker=None, label='Прогноз')
plt.gcf().autofmt_xdate()
plt.title('Котировки акций Полиметалла')
plt.ylabel('Цена акции, МосБиржа, руб.')
plt.legend()
model.compile(loss='sparse_categorical_crossentropy',
              optimizer=optimizers.Adam(lr=0.0001),
              metrics=['accuracy'])

print(model.summary())

batch_size = 1
history = model.fit_generator(train_generator,
                              steps_per_epoch=train_generator.samples //
                              batch_size,
                              validation_data=validation_generator,
                              epochs=80,
                              verbose=1)

score = model.evaluate_generator(validation_generator,
                                 steps=validation_generator.samples //
                                 batch_size)
try:
    print('Validation loss:', score[0])
    print('Validation accuracy:', score[1])

    plt.plot(history.history["accuracy"])
    plt.plot(history.history["val_accuracy"])
    plt.title("Model Accuracy")
    plt.ylabel("Accuracy")
    plt.xlabel("Epoch")
    plt.legend(["Train", "Val"], loc="upper left")
    plt.savefig("Model Accuracy.png")
    plt.show()

    plt.plot(history.history["loss"])
Esempio n. 10
0
momentum = 0.9
sgd = SGD(lr=learning_rate, momentum=momentum,
          decay=decay_rate, nesterov=False)
model.compile(optimizer="sgd", loss="categorical_crossentropy",
              metrics=['accuracy'])

model.fit_generator(
    training_set,
    steps_per_epoch=100,
    epochs=50,
    validation_data=test_set,
    validation_steps=200)


# Model Evaluation
model.evaluate_generator(generator=test_set, steps=50)
# OUTPUT
[1.704445120342617, 0.33798882681564246]

test_set.reset()
pred = model.predict_generator(test_set, steps=50, verbose=1)

predicted_class_indices = np.argmax(pred, axis=1)

labels = (training_set.class_indices)
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
predictions = predictions[:200]
filenames = test_set.filenames

print(len(filename, len(predictions)))
Esempio n. 11
0
epoch_num = np.arange(0, len(val_acc), dtype=int)
plot1, = plt.plot(epoch_num, acc)
plot2, = plt.plot(epoch_num, val_acc)
plt.legend([plot1, plot2],['training accuracy', 'validation accuracy'])
plt.show()
plot1, = plt.plot(epoch_num, loss)
plot2, = plt.plot(epoch_num, val_loss)
plt.legend([plot1, plot2],['training loss', 'validation loss'])

# send message to telegram app on phone
telegram("LSTM bidirectional done!")

# evaluating model with test and validation set
train_result = model.evaluate_generator(
    generate_array(mode="train"), 
    steps = 863,
    verbose=0)
print('training loss:',train_result[0])
print('training accuracy:', train_result[1])

valid_result = model.evaluate(X_valid, Y_valid, verbose=0)
print('validation loss:',valid_result[0])
print('validation accuracy:', valid_result[1])

test_result = model.evaluate(X_test, Y_test, verbose=0)
print('testing loss:',test_result[0])
print('testing accuracy:', test_result[1])

message = "LSTM\n" + "test_loss :" + str(test_result[0]) + "\n" + "test_acc :" + str(test_result[1])  + "\n" + "valid_loss :" + str(valid_result[0]) + "\n" + "valid_acc :" + str(valid_result[1]) + "\n" + "train_loss :" + str(train_result[0]) + "\n" + "train_acc :" + str(train_result[1]) 
telegram(message)
Esempio n. 12
0
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])

early_stop = keras.callbacks.EarlyStopping(
            monitor="val_loss",
            min_delta=1e-2,
            patience=4,
            verbose=1,
        )

history = model.fit(train_data, validation_data=test_data, shuffle=True, epochs=30, 
                    verbose=1, workers=3,callbacks = [early_stop])

score = model.evaluate_generator (test_data,verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])

history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ['loss', 'val_loss']].plot()
history_frame.loc[:, ['accuracy', 'val_accuracy']].plot();

"""

> Save the model (.pb)

"""

import tensorflow as tf
fruitmodel = "/content/SavedmodelFruit"
Esempio n. 13
0
#Use this for mixup
#====================================================================================
history = model.fit_generator(trainGen.generate(),
							   steps_per_epoch = 26769//batch_size, #training images / batch size
							   epochs = EPOCHS,
							   validation_data = validGen,
							   validation_steps = 50//batch_size,
							   verbose = 1,
							   callbacks = [es, mc])

#====================================================================================											
#Standard
#====================================================================================

#history = model.fit_generator(trainGen,
							  #steps_per_epoch = 26769//batch_size, #training images / batch size
							   #epochs = EPOCHS,
							   #validation_data = validGen,
							   #validation_steps = 975//batch_size,
							   #verbose = 1,
							   #callbacks = [es, mc])

#bestModel = load_model("bestModel.h5")

score = model.evaluate_generator(testGen,
								 975//batch_size)




#학습
model.fit(train_generator,
          validation_data=valid_generator,
          epochs=10,
          callbacks=[es, mc, rp])
#정답값을 따로 넣지 않아도됨, 이미 train_generator 만들 때 y 값 들어가 있음

model.load_weights("best.h5")

# test_generator = training_datagen2.flow_from_dataframe("./data4/",
#                                                         batch_size = 32,
#                                                         target_size=(256, 256),
#                                                         class_mode='categorical',
#                                                         subset='validation')

print("-- Evaluate --")
scores = model.evaluate_generator(valid_generator, steps=5)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

# pred_generator = training_datagen2.flow_from_dataframe(x_pred,
#                                                        x_col="path",
#                                                        y_col=None,
#                                                        target_size=(256,256),
#                                                        class_mode=None,
#                                                        shuffle=False)
print("-- Predict --")
result = model.predict_generator(valid_generator, verbose=1)
np.set_printoptions(formatter={'float': lambda x: "{0:0.1f}".format(x * 100)})
# print(result.map(lambda x: np.round_(x, 1)* 100))
print(result)
Esempio n. 15
0
            vertical_flip=False)  # randomly flip images

        # Compute quantities required for feature-wise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)

        # Fit the model on the batches generated by datagen.flow().
        fg = model.fit_generator(datagen.flow(x_train, y_train,
                                         batch_size=batch_size),
                            steps_per_epoch=x_train.shape[0] // batch_size,
                            epochs=1)
        
        #validation_data=(x_test, y_test))
        #validates the model with test set at each epoch
        validations.append(model.evaluate_generator(datagen.flow(x_test, y_test,
                                          batch_size=batch_size),
                                          steps=x_test.shape[0] // batch_size))


# Create a file and store all teh validations of the model in the file

# In[10]:


pickle.dump(validations, open("loss_validation.p",'wb'))


# Save data, get final test validation accuracy, and show example predictions.

# In[13]:
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

###############################################################################
#                              Model Evalutaion                               #
###############################################################################

#Loading bewt weighted connections
model.load_weights(weight_path)
model.save('full_model.h5')

val_loss, val_acc = model.evaluate_generator(generator=validation_generator,
                                             steps=len(validation_generator))

print('Validation Loss:%f' % val_loss)
print('Validation Accuracy:%f' % val_acc)

###############################################################################
#                        Predition on validation set                          #
###############################################################################

#Input and actual output of a big sample from validation datas
valid_x, valid_y = next(
    image_gen_val.flow_from_dataframe(dataframe=df_valid,
                                      directory=train_images_dir,
                                      x_col='path',
                                      y_col='class',
                                      seed=42,
Esempio n. 17
0
class CNN:
    def __init__(self, num_class, train_data=None, train_labels=None, test_data=None, test_labels=None, type="custom",
                 model=None, from_generator=False):

        if from_generator:
            self.train_data = train_data
            self.test_data = test_data
            self.num_class = num_class
            self.STEPS_PER_EPOCH = train_data.n // train_data.batch_size
            self.VALIDATION_STEPS = test_data.n // test_data.batch_size
            self.from_generator = True
            self.input_shape = train_data.image_shape

        else:
            self.train_data = train_data
            self.train_labels = train_labels
            self.test_data = test_data
            self.test_labels = test_labels
            self.num_class = num_class
            self.from_generator = False
            self.input_shape = train_data.shape[1:]

        self.type = type

        if type == "resnet50":
            base_model = tensorflow.keras.applications.resnet50.ResNet50(input_shape=self.input_shape, weights="imagenet",
                                                              include_top=False)
            x = tensorflow.keras.layers.GlobalAveragePooling2D()(base_model.output)
            output = None
            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)
            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)
        
        elif type == "resnet101":
            base_model = tensorflow.keras.applications.ResNet101(input_shape=self.input_shape, weights="imagenet",
                                                              include_top=False)
            x = tensorflow.keras.layers.Flatten()(base_model.output)
            output = None
            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)
            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)
        

        elif type == "inception_resnet":
            base_model = tensorflow.keras.applications.InceptionResNetV2(input_shape=self.input_shape, weights="imagenet",
                                                              include_top=False)
            x = tensorflow.keras.layers.Flatten()(base_model.output)
            output = None
            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)
            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)
        
        elif type == "inception":
            base_model = tensorflow.keras.applications.InceptionV3(input_shape=self.input_shape, weights="imagenet",
                                                              include_top=False)
            x = tensorflow.keras.layers.Flatten()(base_model.output)
            output = None
            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)
            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)

        elif type == "densenet121":
            base_model = tensorflow.keras.applications.DenseNet121(input_shape=self.input_shape, weights="imagenet",
                                                              include_top=False)
            x = tensorflow.keras.layers.Flatten()(base_model.output)
            output = None
            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)
            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)
        elif type == "vgg":

            base_model = tensorflow.keras.applications.VGG19(input_shape=self.input_shape, weights="imagenet",
                                                  include_top=False)

            for layer in base_model.layers:
                layer.trainable = False

            x = Flatten()(base_model.output)
            output = None

            if num_class > 2:
                output = Dense(num_class, activation='softmax')(x)
            else:
                output = Dense(1, activation='sigmoid')(x)

            self.model = tensorflow.keras.models.Model(inputs=base_model.input, outputs=output)
        else:
            self.model = Sequential()
            self.model.add(Conv2D(32, (2, 2), padding='same', input_shape=train_data.shape[1:]))
            self.model.add(Activation('relu'))

    def addLayer(self, type, out_activation='softmax', conv_filter=64):

        if self.type != 'custom':
            return

        if type == 'conv' or type == 'convolution':

            self.model.add(Conv2D(conv_filter, (5, 5)))
            self.model.add(Activation('relu'))

        elif type == 'pool' or type == 'pooling':

            self.model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

        elif type == 'fc' or type == 'fully connected':

            self.model.add(Flatten())
            self.model.add(Dense(512))
            self.model.add(Activation('relu'))

        elif type == 'out' or type == 'output':

            self.model.add(Dense(self.num_class))
            self.model.add(Activation(out_activation))

    def compute(self, loss, train=True, optimizers="adam", lr=0.001, batch=32, num_epochs=10):
        if train:
            if optimizers == "adam":
                self.model.compile(loss=loss, optimizer=tensorflow.keras.optimizers.Adam(learning_rate=lr), metrics=['accuracy'])

            elif optimizers == "sgd":
                self.model.compile(loss=loss, optimizer=tensorflow.keras.optimizers.SGD(learning_rate=lr), metrics=['accuracy'])

            if self.from_generator:
                self.model.fit(self.train_data, validation_data=self.test_data,
                               steps_per_epoch=self.STEPS_PER_EPOCH, validation_steps=self.VALIDATION_STEPS,
                               epochs=num_epochs)
            else:
                self.model.fit(self.train_data, self.train_labels, batch_size=batch, epochs=num_epochs)
        if self.from_generator:
            return self.model.evaluate_generator(self.test_data, verbose=0)
        else:
            return self.model.evaluate(self.test_data, self.test_labels, verbose=0)

    def predict(self, test_data):
        return self.model.predict(test_data)

    def save(self, name="model"):
        self.model.save(f"{name}.h5")

    def load(self, path):
        model = tensorflow.keras.models.load_model(path)
Esempio n. 18
0
optimizer = Adam(lr=0.001)
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(monitor='val_loss', patience=21, mode='min')
file_path = 'c:/data/modelcheckpoint/lotte_efficientnetb0.hdf5'
mc = ModelCheckpoint(file_path, monitor='val_loss',save_best_only=True,mode='min',verbose=1)
rl = ReduceLROnPlateau(monitor='val_loss',factor=0.5,patience=7,verbose=1,mode='min')

history = model.fit_generator(xy_train, steps_per_epoch=(xy_train.samples/xy_train.batch_size), epochs=100, validation_data=xy_val, validation_steps=(xy_val.samples/xy_val.batch_size),
callbacks=[es,mc,rl])
from tensorflow.keras.models import load_model
from sklearn.metrics import r2_score


model = load_model('c:/data/modelcheckpoint/lotte_efficientnetb0.hdf5')
loss, acc = model.evaluate_generator(xy_val)

xy_pred = test_datagen.flow_from_directory(
    '../data/lotte/test',
    target_size=(128,128),
    batch_size=64,
    class_mode='categorical',
    shuffle=False
)


result = model.predict_generator(xy_pred, verbose=True)

import pandas as pd
submission = pd.read_csv('c:/data/lotte/sample.csv')
submission['prediction'] = result.argmax(1)
Esempio n. 19
0
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=2, 
                                   verbose=1, mode='max', min_lr=0.00001)
                              
                              
callbacks_list = [checkpoint, reduce_lr]

history = model.fit_generator(train_gen, steps_per_epoch=train_steps, 
                    validation_data=val_gen,
                    validation_steps=val_steps,
                    epochs=20, verbose=1,
                   callbacks=callbacks_list)

model.metrics_names

val_loss, val_acc = \
model.evaluate_generator(test_gen, 
                        steps=len(df_val))

print('val_loss:', val_loss)
print('val_acc:', val_acc)

predictions = model.predict_generator(test_gen, steps=len(df_val), verbose=1)

predictions.shape

df_preds = pd.DataFrame(predictions, columns=['normal', 'tumor'])

df_preds.head()

y_true = test_gen.classes

Esempio n. 20
0
                              validation_data=validation_batch,
                              validation_steps=STEP_SIZE_VALID,
                              epochs=20,
                              callbacks=callbacks,
                              verbose=1)

model.save('cnn_weapon22.h5')

from tensorflow.keras.models import load_model

# load model
model = load_model('cnn_weapon.h5')

STEP_SIZE_TEST = tet_batch.n // tet_batch.batch_size
try:
    pred = model.evaluate_generator(tet_batch, STEP_SIZE_TEST)
except Exception as e:
    print(e)
pred
model.metrics_names

tet_batch.reset()  # Necessary to force it to start from beginning
Y_pred = model.predict_generator(tet_batch)
y_pred = np.argmax(Y_pred, axis=-1)
sum(y_pred == tet_batch.classes) / 10000

y_pred

# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
Esempio n. 21
0
plt.title('Cross Entropy Loss')
plt.plot(history.history['loss'], color='blue', label='train')
plt.plot(history.history['val_loss'], color='orange', label='test')

# plot accuracy
plt.subplot(222)
plt.title('Classification Accuracy')
plt.plot(history.history['accuracy'], color='blue', label='train')
plt.plot(history.history['val_accuracy'], color='orange', label='test')
"""---
## **PART III - MODEL EVALUATION**
---
"""

# Evaluating model
eval = model.evaluate_generator(testing_set_imgs,
                                steps=np.ceil(num_imgs_testing / batch_size))
print('\nValidación en Test:')
print("Loss: {:.4}".format(eval[0]))
print("Accuracy: {:.2%}".format(eval[1]))
"""---
## **PART IV - SAVING MODEL INTO DISK**
---
"""

# Saving as Keras model in 2 separate files:

# 1. Model configuration (json file)
cnn_model_json = model.to_json()
with open(project_folder + "/model/tf2x/keras/split/flowers_model_tf2.json",
          "w") as json_file:
    json_file.write(cnn_model_json)
Esempio n. 22
0
train_image_gen.class_indices


warnings.filterwarnings('ignore')

results = model.fit_generator(train_image_gen,epochs=20,
                              validation_data=test_image_gen,
                             callbacks=[early_stop])

model.save('malaria_detector.h5')

#Evaluating the Model
losses = pd.DataFrame(model.history.history)
losses[['loss','val_loss']].plot()
model.metrics_names
model.evaluate_generator(test_image_gen)
# https://datascience.stackexchange.com/questions/13894/how-to-get-predictions-with-predict-generator-on-streaming-test-data-in-keras
pred_probabilities = model.predict_generator(test_image_gen)
pred_probabilities
predictions = pred_probabilities > 0.5
# Numpy can treat this as True/False for us
predictions

print(classification_report(test_image_gen.classes,predictions))

#Predicting on an Image
# Your file path will be different!
para_cell

my_image = image.load_img(para_cell,target_size=image_shape)
Esempio n. 23
0
              optimizer='adam',
              metrics=['accuracy'])

# In[6]:

#4. 모델 학습 시키기
model.fit_generator(train_generator,
                    steps_per_epoch=15,
                    epochs=50,
                    validation_data=test_generator,
                    validation_steps=5)

# In[7]:

#5. 모델 평가하기
score = model.evaluate_generator(test_generator, steps=5)
print('loss:', score[0])
print('accuracy:', score[1])

# In[8]:

#6. 예측하기
pred = model.predict_generator(test_generator)
print(test_generator.class_indices)
np.set_printoptions(formatter={'float': lambda x: "{:0.2f}".format(x)})
print(pred)
print(pred.argmax(axis=1))

# ## 상기 예제 accuracy 높이기
# 1. 데이터 확보, 데이터 양 늘리기(ImageDataGenerator)
# 2. 레이어 층
Esempio n. 24
0
filepath = "final_model_ep_{epoch:02d}_acc_{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_accuracy',
                             verbose=1,
                             save_best_only=False,
                             mode='max')

start = time.time()
fit_history_del = model_delta.fit_generator(
    generator=data_generator(batch_size, x_max, 'delta', '_', 100),
    steps_per_epoch=N_final // batch_size,
    validation_data=data_generator(batch_size, x_max, 'delta', '_', 100),
    validation_steps=N_val // batch_size,
    callbacks=[checkpoint],
    epochs=N_epochs)
end = time.time()

# Saving history and model

model_delta.save("final_model.h5")
with open("final_model_hist", 'wb') as file_pi:
    pickle.dump(fit_history_del.history, file_pi)

# Evaluating the model

score_del = model_delta.evaluate_generator(generator=data_generator(
    batch_size, x_max, 'delta', '_', 100),
                                           steps=N_test // batch_size)
print(
    f'Accuracy on testing data, D=100, prior = delta: {score_del[1]}, in {end-start} seconds'
)
Esempio n. 25
0
model.compile(loss='sparse_categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])

model.summary()

print("Fitting the model")

start = time.time()
# Fit the model
history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=int(np.ceil(len(train_data) / float(BATCH_SIZE))),
    epochs=EPOCHS)
end = time.time()

model.save("cifar_model.h5")
#!ls -l --block-size=M

print("Training accuracy: ", history.history['accuracy'][-1])
print("Training time: ", end - start)

start = time.time()
# Evaluate the model
scores = model.evaluate_generator(
    test_data_gen, steps=int(np.ceil(len(test_data) / float(BATCH_SIZE))))
end = time.time()

print("Testing accuracy: ", scores[1])
print("Testing time: ", end - start)
Esempio n. 26
0
early_stopping = EarlyStopping(monitor='val_loss',
                               patience=5,
                               mode='auto',
                               verbose=2)

hist = model.fit_generator(mf_train,
                           steps_per_epoch=100,
                           epochs=2,
                           validation_data=mf_test,
                           validation_steps=4,
                           callbacks=[early_stopping])

# 4. 평가, 예측

scores = model.evaluate_generator(mf_test, steps=5)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
# accuracy

print(type(mf_train[0][0]))
# mf_train_x_npy = []
# for cnt in range(len(mf_train)):
#     mf_train_x_npy.append([mf_train[cnt][0]])
# print(mf_train_x_npy)
# print(mf_train_x_npy.shape)

# print("========== numpy save 시작 ==========")
# np.save('./data/keras64_mf_train_x.npy', arr=mf_train[0][0])
# np.save('./data/keras64_mf_train_y.npy', arr=mf_train[0][1])
# np.save('./data/keras64_mf_test_x.npy', arr=mf_test[1][0])
# np.save('./data/keras64_mf_test_y.npy', arr=mf_test[1][1])
base_model = tf.keras.applications.VGG16(weights='imagenet',
                                         input_shape=(224, 224, 3),
                                         include_top=False)

model = Sequential(
    [base_model,
     GlobalAveragePooling2D(),
     Dense(2, activation='softmax')])

model.compile(Adam(lr=.00001), loss=[focal_loss()], metrics=[AUC()])
mc = ModelCheckpoint('models/best_classifier.h5',
                     monitor='val_loss',
                     save_best_only=True,
                     verbose=1,
                     period=1)
saver = CustomSaver()
model.fit_generator(train_batches,
                    steps_per_epoch=len(train_batches),
                    validation_data=validation_batches,
                    validation_steps=len(validation_batches),
                    epochs=epochs,
                    verbose=1,
                    callbacks=[mc, saver],
                    workers=8)

model.load_weights('models/best_classifier.h5')
model.evaluate_generator(test_batches,
                         steps=len(test_batches),
                         verbose=1,
                         workers=8)
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()

plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()

scores = model.evaluate_generator(test_gen, steps=len(test_gen))
print(scores)
print('Model accuracy: {}'.format(scores[1]))

scores = model.evaluate_generator(train_gen, steps=len(train_gen))
print(scores)
print('Model accuracy: {}'.format(scores[1]))


def plot_confusion_matrix(cm,
                          classes,
                          normalize=True,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    plt.figure(figsize=(10, 10))
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
epochs = 400
#epochs = 600
samples_per_epoch = 4654
val_samples = 1168

#Fit the model
hist = History()
model.fit_generator(train_generator,
                    steps_per_epoch=(samples_per_epoch / 32),
                    epochs=epochs,
                    verbose=1,
                    validation_data=test_generator,
                    callbacks=[earlystopper, lrate, checkpoint, hist])

#evaluate the model
scores = model.evaluate_generator(test_generator)
print("Accuracy = ", scores[1])

#save model
savePath = wdir
model.save_weights(os.path.join(
    savePath,
    'cnnModelDEp80.h5'))  # save weights after training or during training
model.save(os.path.join(savePath, 'cnnModelDEp80.h5'))  #save complied model

#plot acc and loss vs epochs
import matplotlib.pyplot as plt
print(hist.history.keys())
#accuracy
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_acc'])
Esempio n. 30
0
# 学習する。
history = model.fit_generator(
    train_generator,
    steps_per_epoch=train_generator.samples // batch_size,
    validation_data=val_generator,
    validation_steps=val_generator.samples // batch_size,
    epochs=num_epochs,
)

epochs = np.arange(1, num_epochs + 1)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 4))

# 損失関数の履歴を可視化する。
ax1.plot(epochs, history.history["loss"], label="loss")
ax1.plot(epochs, history.history["val_loss"], label="validation loss")
ax1.set_xlabel("epochs")
ax1.legend()

# 精度の履歴を可視化する。
ax2.plot(epochs, history.history["acc"], label="accuracy")
ax2.plot(epochs, history.history["val_acc"], label="validation accuracy")
ax2.set_xlabel("epochs")
ax2.legend()

plt.show()

# 評価する。
test_loss, test_acc = model.evaluate_generator(val_generator)
print(f"test loss: {test_loss:.2f}, test accuracy: {test_acc:.2%}")