Esempio n. 1
0
                                                  target_size=(nrow, ncol),
                                                  batch_size=batch_size,
                                                  class_mode='categorical')

model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
#categorical_crossentropy
steps_per_epoch = train_generator.n // batch_size
validation_steps = test_generator.n // batch_size

nepochs = 40

hist = model.fit_generator(train_generator,
                           steps_per_epoch=steps_per_epoch,
                           epochs=nepochs,
                           validation_data=test_generator,
                           validation_steps=validation_steps)

#Model to JSON
model_json = model.to_json()
with open("model.json", "w") as f:
    f.write(model_json)
#Weights to HDF5
model.save_weights("weights.h5")
print("Saved model to disk")

with open('model.json', 'r') as f:
    json = f.read()
loaded_model = model_from_json(json)
loaded_model.load_weights("weights.h5", by_name=True)
Esempio n. 2
0
x_train /= 255
x_test /= 255



# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)

now = datetime.now()
print("Starting training at", now.strftime("%d-%b-%Y %H:%M:%S")) # lets see how long this takes



# fit model
model.fit_generator(train_it, steps_per_epoch=16, validation_data=val_it, validation_steps=8)




# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
                    batch_size=batch_size),
                    epochs=epochs,
                    validation_data=(x_test, y_test),
                    use_multiprocessing=use_multiprocessing,
                    max_queue_size=10,
                    workers=workers) # workers = 1 or 4?


# model.add(Dropout(0.5))
# 
# model.add(Dense(2))
# model.add(Activation('softmax'))
#     
# model.summary()

# In[7]:


model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(lr=0.0001), metrics=['accuracy'])

from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("CatDogClassifier.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
history = model.fit_generator(steps_per_epoch=128,generator=train_data_gen, validation_data= val_data_gen, validation_steps=50,epochs=50,callbacks=[checkpoint,early])


# In[9]:


# Plot model accuracy and loss
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(50)

plt.figure(figsize=(12, 4))
Esempio n. 4
0
def train_user_model(user_token):
    try:
        # Importing the Keras libraries and packages
        logger.info("train_user_model token : %s",
                    str(user_token),
                    extra={'AppName': 'API'})

        import numpy as np
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Convolution2D
        from tensorflow.keras.layers import Dropout
        from tensorflow.keras.layers import MaxPooling2D
        from tensorflow.keras.layers import Flatten
        from tensorflow.keras.layers import Dense
        import sklearn as sklearn
        from sklearn.metrics import classification_report, confusion_matrix
        import tensorflowjs as tfjs
        import matplotlib.pyplot as plt
        import os

        training_data_path = TRAINING_DATASET_PATH + user_token + "/train_grey"
        testing_data_path = TRAINING_DATASET_PATH + user_token + "/test_grey"
        model_save_path = TRAINING_DATASET_PATH + user_token + "/model"

        gesture_count = len(os.listdir(training_data_path))

        # Step 1 - Building the CNN
        # Initializing the CNN
        classifier = Sequential()

        # First convolution layer and pooling
        classifier.add(
            Convolution2D(32, (3, 3),
                          input_shape=(64, 64, 1),
                          activation='relu'))
        classifier.add(MaxPooling2D(pool_size=(2, 2)))
        classifier.add(Dropout(0.5))

        # Second convolution layer and pooling
        classifier.add(Convolution2D(64, (3, 3), activation='relu'))
        # input_shape is going to be the pooled feature maps from the previous convolution layer
        classifier.add(MaxPooling2D(pool_size=(2, 2)))
        classifier.add(Dropout(0.5))

        classifier.add(Convolution2D(64, (3, 3), activation='relu'))
        classifier.add(MaxPooling2D(pool_size=(2, 2)))
        classifier.add(Dropout(0.5))

        classifier.add(Convolution2D(128, (3, 3), activation='relu'))
        classifier.add(MaxPooling2D(pool_size=(2, 2)))
        classifier.add(Dropout(0.5))

        # Flattening the layers
        classifier.add(Flatten())

        # Adding a fully connected layer
        classifier.add(Dense(units=128, activation='relu'))
        classifier.add(Dropout(0.5))
        classifier.add(Dense(units=gesture_count,
                             activation='softmax'))  # softmax for more than 2
        # classifier.add(Dense(units = 6, activation = 'sigmoid'))

        # Compiling the CNN
        classifier.compile(
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])  # categorical_crossentropy for more than 2

        # Step 2 - Preparing the train/test data and training the model

        # Code copied from - https://keras.io/preprocessing/image/
        from keras.preprocessing.image import ImageDataGenerator

        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

        test_datagen = ImageDataGenerator(rescale=1. / 255)

        training_set = train_datagen.flow_from_directory(
            training_data_path,
            target_size=(64, 64),
            batch_size=5,
            color_mode='grayscale',
            class_mode='categorical')

        test_set = test_datagen.flow_from_directory(testing_data_path,
                                                    target_size=(64, 64),
                                                    batch_size=5,
                                                    color_mode='grayscale',
                                                    class_mode='categorical')

        classifier.fit_generator(
            training_set,
            steps_per_epoch=20,  # No of images in training set
            epochs=1,
            validation_data=test_set,
            validation_steps=5)  # No of images in test set

        tfjs.converters.save_keras_model(classifier, model_save_path)

        return "TRAINING SUCCESS"
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        logger.error("train_user_model %s at line %s",
                     str(exc_tb.tb_lineno),
                     str(e),
                     extra={'AppName': 'API'})
        return "TRAINING ERROR"
Esempio n. 5
0
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))

# If you want to train the same model or try other models, go for this
if mode == "train":
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.0001, decay=1e-6),
                  metrics=['accuracy'])
    model_info = model.fit_generator(train_generator,
                                     steps_per_epoch=num_train // batch_size,
                                     epochs=num_epoch,
                                     validation_data=validation_generator,
                                     validation_steps=num_val // batch_size)
    model.save_weights('model.h5')
    plot_model_history(model_info)

# emotions will be displayed on your face from the webcam feed
elif mode == "display":
    model.load_weights('model.h5')

    # prevents openCL usage and unnecessary logging messages
    cv2.ocl.setUseOpenCL(False)

    # dictionary which assigns each label an emotion (alphabetical order)
    emotion_dict = {
        0: "Angry",
def series_classification(path_to_data, n):
    cwd = os.getcwd()
    if (cwd != path_to_data):
        print("series_classification called from wrong directory")
    else:
        # set necessary directories
        PATH = os.path.join(path_to_data, 'series_filtered')
        train_dir = os.path.join(PATH, 'train')
        validation_dir = os.path.join(PATH, 'validation')
        train_1_dir = os.path.join(
            train_dir, '1')  # directory with our training positive pictures
        train_2_dir = os.path.join(
            train_dir, '2')  # directory with our training negative pictures
        train_3_dir = os.path.join(
            train_dir, '3')  # directory with our training neutral pictures
        validation_1_dir = os.path.join(
            validation_dir,
            '1')  # directory with our validation positive pictures
        validation_2_dir = os.path.join(
            validation_dir,
            '2')  # directory with our validation negative pictures
        validation_3_dir = os.path.join(
            validation_dir,
            '3')  # directory with our validation neutral pictures

        # assign count variables
        num_1_tr = len(os.listdir(train_1_dir))
        num_2_tr = len(os.listdir(train_2_dir))
        num_3_tr = len(os.listdir(train_3_dir))
        num_1_val = len(os.listdir(validation_1_dir))
        num_2_val = len(os.listdir(validation_2_dir))
        num_3_val = len(os.listdir(validation_3_dir))
        total_train = num_1_tr + num_2_tr + num_3_tr
        total_val = num_1_val + num_2_val + num_3_val

        # set constants
        batch_size = 128
        epochs = 15
        IMG_HEIGHT = 150
        IMG_WIDTH = 150

        #prepare data
        train_image_generator = ImageDataGenerator(
            rescale=1. / 255)  # Generator for our training data
        validation_image_generator = ImageDataGenerator(
            rescale=1. / 255)  # Generator for our validation data

        #load images from disc
        train_data_gen = train_image_generator.flow_from_directory(
            batch_size=batch_size,
            directory=train_dir,
            shuffle=True,
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            class_mode='categorical')

        val_data_gen = validation_image_generator.flow_from_directory(
            batch_size=batch_size,
            directory=validation_dir,
            target_size=(IMG_HEIGHT, IMG_WIDTH),
            class_mode='categorical')

        # create the model
        model = Sequential([
            Conv2D(16,
                   3,
                   padding='same',
                   activation='relu',
                   input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
            MaxPooling2D(),
            Conv2D(32, 3, padding='same', activation='relu'),
            MaxPooling2D(),
            Conv2D(64, 3, padding='same', activation='relu'),
            MaxPooling2D(),
            Flatten(),
            Dense(512, activation='relu'),
            Dense(
                3
            )  # three neurons in output layer allow for three different categorization options
        ])

        # compile the model
        model.compile(
            optimizer='adam',
            loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
            metrics=['accuracy'])

        # Model Summary
        model.summary()

        # train model
        history = model.fit_generator(train_data_gen,
                                      steps_per_epoch=total_train //
                                      batch_size,
                                      epochs=epochs,
                                      validation_data=val_data_gen,
                                      validation_steps=total_val // batch_size)

        # visualize training
        # ------------------
        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        epochs_range = range(epochs)

        plt.figure(figsize=(8, 8))
        plt.subplot(1, 2, 1)
        plt.plot(epochs_range, acc, label='Training Accuracy')
        plt.plot(epochs_range, val_acc, label='Validation Accuracy')
        plt.legend(loc='lower right')
        plt.title('Training and Validation Accuracy')

        plt.subplot(1, 2, 2)
        plt.plot(epochs_range, loss, label='Training Loss')
        plt.plot(epochs_range, val_loss, label='Validation Loss')
        plt.legend(loc='upper right')
        plt.title('Training and Validation Loss')
        #plt.show()

        # save data

        # Save the model
        model.save('series_class_model.h5')

        # create classification_results directory
        dirPath = "./series_class_res"
        try:
            os.mkdir(dirPath)
        except OSError:
            print(
                "Warning: Creation of the directory %s failed, might already exist"
                % dirPath)

        # find time and date for naming purposes
        now = datetime.now()  # current date and time
        date_time = now.strftime("%m:%d:%Y_%H:%M:%S")

        fname = "series_class_res/learning_data_" + date_time + ".png"
        plt.savefig(fname)
        # ------------------

        # Generate Summary File
        # ------------------
        results = list()
        graphNum = "Graph_" + str(n)
        # create a file with the graphnames and correlations
        results.append(("Graph Number", "Epoch", "Train_Acc", "Val_Acc",
                        "Train_Loss", "Val_Loss"))
        for i in epochs_range:
            results.append(
                (graphNum, i + 1, acc[i], val_acc[i], loss[i], val_loss[i]))

        with open('series_class_res/classification_info.csv', 'a') as f:
            writer = csv.writer(f, delimiter=',')
            writer.writerows(results)
        f.close()
#
# # Output Layer - number of nodes corresponds to number of y labels
# model.add(Dense(num_classes, activation='softmax'))

# Compile Model
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Train the Model
train_generator = train_datagen.flow(X_train, Y_train)
validation_generator = validation_datagen.flow(X_test, Y_test)

history = model.fit_generator(train_generator,
                              epochs=20,
                              verbose=1,
                              validation_data=validation_generator,
                              callbacks=[callbacks])
# Train Model
model.summary()

# Save Model
model.save("emnist_trained.h5")

import matplotlib.pyplot as plt


def plot_graphs(history, string):
    plt.plot(history.history[string])
    plt.plot(history.history['val_' + string])
    plt.xlabel("Epochs")
Esempio n. 8
0
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  # 크기 줄이기

model.add(Flatten())  #Dence Layer
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))

# 3. 모델 학습과정 설정하기
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# 4. 모델 학습시키기
model.fit_generator(
    train_generator,
    steps_per_epoch=15,
    epochs=50,  #50번 반복
    validation_data=test_generator,
    validation_steps=5)

# 5. 모델 평가하기
print("-- Evaluate --")
scores = model.evaluate_generator(test_generator, steps=5)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

# 6. 모델 사용하기
print("-- Predict --")
output = model.predict_generator(test_generator, steps=5)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print(test_generator.class_indices)
print(output)
datagen.fit(x_train)
datagen.fit(x_val)

datagen = ImageDataGenerator(
    rescale=1. / 255)  # rescaling pixel values from [0,255] to [0,1]
datagen.fit(x_test)

# Set callback functions to early stop training and save the best model so far
callbacks_early_stopping = [
    EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)
]

history_of_model_1 = model_2.fit_generator(
    datagen.flow(x_train, y_train, batch_size=batch_size_chosen),
    steps_per_epoch=len(x_train) / batch_size_chosen,
    epochs=num_epochs,
    callbacks=callbacks_early_stopping,
    validation_data=datagen.flow(x_val, y_val, batch_size=batch_size_chosen),
    validation_steps=len(x_val) / batch_size_chosen)

#stop the timer
end_t = timer()
chrono = end_t - start_t
print("Elapsed time = {} seconds".format(chrono))
# --------------------------------------------------------------------------- #
# ---- save the model and the weights ----
model_2.save(full_name + model_name + 'model.h5')
model_2.save_weights(full_name + model_name + 'weights.h5')
print('Model saved\n')

# Get the dictionary containing each metric and the loss for each epoch
# model.load_weights('model_weights/MobileNetV2.h5')

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[BinaryAccuracy()])

model.summary()

print("Start time:", datetime.now())
print()

history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=test_data_gen,
    validation_steps=total_val // batch_size
)

print()
print("End time:", datetime.now())

model.save('model_weights/mobilenet.h5')

acc = history.history['binary_accuracy']
val_acc = history.history['val_binary_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']
Esempio n. 11
0
# datagen = ImageDataGenerator(rescale=1./255)
datagen = ImageDataGenerator(rescale=1. / 255,
                             rotation_range=15,
                             width_shift_range=.1,
                             height_shift_range=.1,
                             horizontal_flip=True)

# Training params
epochs = 10
batch_size = 32

# Training with data augmentation
history = model.fit_generator(datagen.flow(train_images,
                                           train_labels,
                                           batch_size=batch_size),
                              steps_per_epoch=len(train_images) / 32,
                              epochs=epochs,
                              validation_data=(validation_images,
                                               validation_labels))

# Training
# model.fit(train_images,
#           train_labels,
#           epochs=epochs,
#           batch_size=batch_size)

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=1)
print('\nTest accuracy:', test_acc)

# Plotting training and testing over time
plot_history(history)
Esempio n. 12
0
from tensorflow.keras.preprocessing.image import ImageDataGenerator 
classificador = Sequential()
classificador.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classificador.add(MaxPooling2D(pool_size = (2, 2)))

classificador.add(Conv2D(32, (3, 3), activation = 'relu'))
classificador.add(MaxPooling2D(pool_size = (2, 2)))

classificador.add(Flatten())

classificador.add(Dense(units = 4, activation = 'relu'))
classificador.add(Dense(units = 4, activation = 'relu'))
classificador.add(Dense(units = 1, activation = 'sigmoid'))
classificador.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

gerador_treinamento = ImageDataGenerator(rescale = 1./255, rotation_range=7, 
                                         horizontal_flip = True, shear_range=0.2,
                                         height_shift_range=0.07, zoom_range=0.2)
gerador_teste = ImageDataGenerator(rescale = 1./255)

base_treinamento = gerador_treinamento.flow_from_directory('dataset_personagens/training_set',
                                                 target_size = (64, 64),
                                                 batch_size = 10,
                                                 class_mode = 'binary')
base_teste = gerador_teste.flow_from_directory('dataset_personagens/test_set',
                                            target_size = (64, 64),
                                            batch_size = 10,
                                            class_mode = 'binary')

classificador.fit_generator(base_treinamento, steps_per_epoch = 196, epochs = 100,
                         validation_data = base_teste, validation_steps = 73)
Esempio n. 13
0
    batch_size=192,
    class_mode='categorical',
    shuffle=True)

print(train_generator.n)
print(train_generator.batch_size)
print(253 // 32)

classifier.compile(optimizer='Adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

step_size_train = train_generator.n // train_generator.batch_size

r = classifier.fit_generator(generator=train_generator,
                             steps_per_epoch=step_size_train,
                             epochs=25)

import matplotlib.pyplot as plt
print(r.history.keys())

# loss
plt.plot(r.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()

# accuracy

plt.plot(r.history['accuracy'])
Esempio n. 14
0
yTrain = yScaler.transform(yTrain)
yTest = yScaler.transform(yTest)

trainDataGen = TimeseriesGenerator(xTrain, yTrain, length=xLen, batch_size=100)

testDataGen = TimeseriesGenerator(xTest, yTest, length=xLen, batch_size=100)

# Полносвязная НС
modelD = Sequential()
modelD.add(Dense(150, input_shape=(xLen, 5), activation="linear"))
modelD.add(Flatten())
modelD.add(Dense(1, activation="linear"))

modelD.compile(loss="mse", optimizer=Adam(lr=1e-4))

min_price = np.min(np.array(data.iloc[:, 1]))
max_price = np.max(np.array(data.iloc[:, 1]))

history = modelD.fit_generator(trainDataGen,
                               epochs=100,
                               verbose=1,
                               validation_data=testDataGen)

plt.plot([(max_price - min_price) * loss for loss in history.history['loss']],
         label='Средняя абсолютная ошибка на обучающем наборе')
plt.plot([(max_price - min_price) * val
          for val in history.history['val_loss']],
         label='Средняя абсолютная ошибка на проверочном наборе')
plt.ylabel('Средняя ошибка')
plt.legend()
plt.show()
# For practical reasons, we will generate the model only once. For future
# runs of the script, we will load it from memory. Notice that we will need to
# change this if we decide to implement something like
# cross validation, for instance.
#
# In the future, I would like to look into [Git Large File Storage](https://git-lfs.github.com/)
# for this type of things.

# %%
n_epochs = 50

model_scratch_name = 'model_scratch_epochs=' + str(
    n_epochs) + '_vprop={0:.2f}'.format(val_prop)
if not (PATH_MODELS / (model_scratch_name + '.h5')).exists():
    history_scratch_ = model_scratch.fit_generator(
        generator=training_generator,
        epochs=n_epochs,
        validation_data=validation_generator)
    model_scratch.save(PATH_MODELS / (model_scratch_name + '.h5'))

    # Save history.
    history_scratch = history_scratch_.history
    pickle.dump(
        history_scratch,
        open((PATH_MODELS / ('history_' + model_scratch_name + '.p')), "wb"))

else:
    # Load model.
    model_scratch = tf.keras.models.load_model(
        (PATH_MODELS / (model_scratch_name + '.h5')))

    # Load history.
Esempio n. 16
0
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

datagen = ImageDataGenerator(rotation_range=40,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             fill_mode='nearest')

model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                    steps_per_epoch=x_train.shape[0] // batch_size,
                    validation_data=(x_test, y_test),
                    epochs=epochs,
                    verbose=2)

score = model.evaluate(x_test, y_test, verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

model.save('model.h5')
Esempio n. 17
0
model.add(
    Lambda(lambda x: x / 255. - 0.5,
           input_shape=(row, col, ch),
           output_shape=(row, col, ch)))
model.add(Cropping2D(cropping=((75, 25), (0, 0))))
model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2), activation='relu'))
model.add(Dropout(0.3))
model.add(Conv2D(48, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.3))

model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
                    steps_per_epoch=math.ceil(len(train_samples) / batch_size),
                    validation_data=validation_generator,
                    validation_steps=math.ceil(
                        len(validation_samples) / batch_size),
                    epochs=5,
                    verbose=1)

model.save("./model.h5")
Esempio n. 18
0
model.add(Conv2D(64,(3,3)))
model.add(Dropout(0.2))
model.add(Conv2D(32,(3,3)))
model.add(Dropout(0.2))
model.add(Conv2D(16,(3,3)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(32, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
es = EarlyStopping(monitor='loss', patience=10, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5, verbose=1)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
history = model.fit_generator(
    xy_train, steps_per_epoch=32, epochs=200, validation_data=xy_test, validation_steps=4, callbacks=[es,reduce_lr]
)
# history = model.fit(x_train, y_train, epochs=200, validation_data=(x_test, y_test), callbacks=[es,reduce_lr])

# fit_generator -> xy together
# step_per_epoch -> data / batch_size
loss, acc = model.evaluate(xy_test)
print("loss : ", loss)
print("acc : ", acc)

# loss :  0.6928289532661438
# acc :  0.5155529975891113

# acc = history.history['acc']
# val_acc = history.history['val_acc']
# loss = history.history['loss']
model.summary()

model.compile(loss="binary_crossentropy",
              optimizer="rmsprop",
              metrics=["accuracy"])

# model.fit(
#     train_generator,
#     steps_per_epoch = nb_train_samples // batch_size,
#     epochs=epochs,
#     validation_data=validation_generator,
#     validation_steps=nb_validation_samples // batch_size)

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=validation_generator,
                    validation_steps=nb_validation_samples // batch_size)

# model.compile(loss="binary_crossentropy", optimizer="rmsprop", metrics=["accuracy"])

# model.save_weights("first_try.h5")
# model.save("fisrt_try_model.h5")

model.save("first_try.h5")

img_pred = image.load_img("spital.jpg", target_size=(150, 150))
img_pred = image.img_to_array(img_pred)
img_pred = np.expand_dims(img_pred, axis=0)

result = model.predict(img_pred)
# In[41]:


model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])


# In[42]:


model.summary()


# In[43]:


res = model.fit_generator(train_transform,steps_per_epoch=7616//128,validation_steps=2292//128,epochs=30,
                          validation_data=test_transform)


# In[44]:


model.save('Mechanical-Parts-1.h5')


# In[161]:


#import numpy library
import numpy as np
#import load_model method to load our saved model
from tensorflow.keras.models import load_model
Esempio n. 21
0
model.add(Dense(1, activation='sigmoid'))

from tensorflow.keras.optimizers import Adam

optimizer = Adam(lr=0.001)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint

es = EarlyStopping(monitor='val_loss', patience=30)
lr = ReduceLROnPlateau(monitor='val_loss', patience=15, factor=0.5, verbose=1)
filepath = 'c:/data/modelcheckpoint/keras67_1_checkpoint3.hdf5'
cp = ModelCheckpoint(filepath, save_best_only=True, monitor='val_loss')
history = model.fit_generator(
    xy_train,
    steps_per_epoch=(xy_train.samples / xy_train.batch_size),
    epochs=500,
    validation_data=xy_val,
    validation_steps=(xy_val.samples / xy_val.batch_size),
    callbacks=[es, cp, lr])
from tensorflow.keras.models import load_model
from sklearn.metrics import r2_score

loss, acc = model.evaluate_generator(xy_val)

print("loss :", loss)
print("acc :", acc)

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
Esempio n. 22
0
test_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size = (128, 128),
                                                 batch_size = 32,
                                                 class_mode = 'binary')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size = (128, 128),
                                            batch_size = 32,
                                            class_mode = 'binary')

classifier.fit_generator(training_set,
                         steps_per_epoch = 8000,
                         epochs = 25,
                         validation_data = test_set,
                         validation_steps = 2000)
    
# Part 3 - Making new predictions

import numpy as np
from tensorflow.keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (128, 128))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
    prediction = 'dog'
else:
                  padding="same",
                  input_shape=(300, 300, 3),
                  activation='relu'))
cnn.add(MaxPooling2D((2, 2)))
cnn.add(
    Convolution2D(128, (3, 3),
                  padding="same",
                  input_shape=(300, 300, 3),
                  activation='relu'))
cnn.add(MaxPooling2D((2, 2)))
cnn.add(Flatten())
cnn.add(Dropout(0.5))
cnn.add(Dense(512, activation='relu'))
cnn.add(Dense(16, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.RMSprop(lr=1e-4),
            metrics=['accuracy'])

cnn.fit_generator(entrenamiento_generador,
                  steps_per_epoch=pasos,
                  epochs=epocas,
                  validation_data=validacion_generador,
                  validation_steps=validation_steps)

target_dir = './modelo/'
if not os.path.exists(target_dir):
    os.mkdir(target_dir)
cnn.save('./modelo/modelo.h5')
cnn.save_weights('./modelo/pesos.h5')
Esempio n. 24
0
    SOURCE_DIR,
    batch_size=25,
    target_size=(20, 20),
    classes=labels,
    class_mode='categorical',
    subset='training')  # set as training data

validation_generator = train_datagen.flow_from_directory(
    SOURCE_DIR,  # same directory as training data
    batch_size=25,
    classes=labels,
    target_size=(20, 20),
    subset='validation')  # set as validation data

H = model.fit_generator(train_generator,
                        validation_data=validation_generator,
                        epochs=EPOCHS)

#PLOTTING THE TRAINING ACCURACY AND LOSS GRAPHS

# N = EPOCHS
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
# plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
# plt.title("Training Loss and Accuracy on Fresh vs Rotten Dataset")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
# plt.legend(loc="lower left")
Esempio n. 25
0
callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       min_lr=1e-8,
                                       patience=5,
                                       verbose=1)

# setup list to hold all callback info
callbacks = [callback_early_stopping,
             callback_checkpoint,
             callback_tensorboard,
             callback_reduce_lr]

# Run the model for 20 epochs, 100 steps per
model.fit_generator(generator=generator,
                    epochs=100,
                    steps_per_epoch=100,
                    validation_data=validation_data,
                    callbacks=callbacks)

# load best model
try:
    model.load_weights(path_checkpoint)
except Exception as error:
    print("Error trying to load checkpoint.")
    print(error)

# path to save the model
path = r'C:/Users/TomBrody/Desktop/School/767 ML/SC Bot/NN/model//'

# path plus model and time
model.save(path+'CuDNNLSTM-{}'.format(str(int(time.time())))+'.h5')
Esempio n. 26
0
    Flatten(),
    Dense(256, activation='relu'),
    Dense(1, activation='sigmoid')
])

# Compile the model
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy', 'AUC'])

# Display model architecture
print(model.summary())

# Fit the data using generators and display metrics
history = model.fit_generator(train_data_gen,
                              epochs=epochs,
                              validation_data=test_data_gen)

# Store fit history for plotting
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)

# Plot the training/test accuracy and loss
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Test Accuracy')
plt.legend(loc='lower right')
Esempio n. 27
0
# Building Image Generator
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory('Data/Training/',
                                                    target_size=(300, 300),
                                                    batch_size=128,
                                                    class_mode='binary'
                                                    )

#%%
# Training
log_dir = "logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = TensorBoard(log_dir, histogram_freq=1, profile_batch=0)

callbacks = [tensorboard_callback]

history = model.fit_generator(train_generator, steps_per_epoch=8,
                              epochs=15, verbose=1, callbacks=callbacks)

#%%
# Prediction
path = ''

img = image.load_img(path, target_size=(300, 300))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)

images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])

if classes[0]>0.5:
    print(fn + " is a human")
Esempio n. 28
0
    verbose=1,
    save_best_only=True)

# Early stopping
# early_stopping = EarlyStopping(monitor='val_acc', verbose=1, patience=5)

xception_model_reducer = ReduceLROnPlateau(monitor='val_loss',
                                           factor=0.5,
                                           patience=3)

hists = []

hist = xception_model.fit_generator(
    train_generator_xception,
    steps_per_epoch=train_samples // batch_size_small,
    epochs=1,
    verbose=1,
    callbacks=[xception_model_reducer, xception_model_checkpointer],
    validation_data=validation_generator_xception,
    validation_steps=validation_samples // batch_size_small)

hists.append(hist)
hist = xception_model.fit_generator(
    train_generator_xception,
    steps_per_epoch=train_samples // batch_size_small,
    epochs=10,
    verbose=1,
    callbacks=[xception_model_reducer, xception_model_checkpointer],
    validation_data=validation_generator_xception,
    validation_steps=validation_samples // batch_size_small)

hists.append(hist)
Esempio n. 29
0
model.add(Dense(512, activation='relu'))

model.add(Dropout(0.2))
model.add(Dense(5))
# %%
model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])
# %%
epochs = 80

history = model.fit_generator(
    train_data_gen4,
    steps_per_epoch=int(np.ceil(train_data_gen4.n / float(batch_size))),
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=int(np.ceil(train_data_gen4.n / float(batch_size))),
    verbose=2)
# %%
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
Esempio n. 30
0
        zoom_range=0.0,
        channel_shift_range=0.0,
        fill_mode="nearest",
        cval=0.0,
        horizontal_flip=True,
        vertical_flip=False,
        rescale=None,
        preprocessing_function=None,
        data_format=None,
        validation_split=0.0,
    )
    datagen.fit(x_train)
    model.fit_generator(
        datagen.flow(x_train, y_train, batch_size=batch_size),
        epochs=hvd_adapt_epochs(epochs),
        validation_data=(x_test, y_test),
        workers=4,
        callbacks=hvd_adapt_callbacks([], True),
        verbose=(1 if (hvd.rank() == 0) else 0),
    )
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
if hvd.rank() == 0:
    model.save(model_path)
print(("Saved trained model at %s " % model_path))
if hvd.rank() == 0:
    scores = model.evaluate(x_test, y_test, verbose=1)
    print("Test loss:", scores[0])
    print("Test accuracy:", scores[1])