Пример #1
0
def train_net(steps, epochs):
    # Get images
    X = []
    for filename in os.listdir('./color_images/Train/'):
        X.append(img_to_array(load_img('./color_images/Train/' + filename)))
        # print(filename)
    X = np.array(X, dtype=float)
    # Set up training and test data
    split = int(0.95 * len(X))
    Xtrain = X[:split]
    Xtrain = 1.0 / 255 * Xtrain
    # Design the neural network
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), input_shape=(None, None, 1), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # Finish model
    model.compile(optimizer='rmsprop', loss='mse')
    # Image transformer
    datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=20,
        horizontal_flip=True)
    # Generate training data
    batch_size = 50

    def image_a_b_gen(batch_size):
        for batch in datagen.flow(Xtrain, batch_size=batch_size):
            lab_batch = rgb2lab(batch)
            X_batch = lab_batch[:, :, :, 0]
            Y_batch = lab_batch[:, :, :, 1:] / 128
            yield (X_batch.reshape(X_batch.shape + (1,)), Y_batch)

    # Train model
    TensorBoard(log_dir='/output')
    model.fit_generator(image_a_b_gen(batch_size), steps_per_epoch=steps, epochs=epochs)
    # Test images
    Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
    Xtest = Xtest.reshape(Xtest.shape + (1,))
    Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
    Ytest = Ytest / 128
    print(model.evaluate(Xtest, Ytest, batch_size=batch_size))
    model.save('./result/network.h5')
    del model
Пример #2
0
def long_short_term_memory(data, settings):
    """Creates a Long short-term memory model (LSTM) and predictions.

    Args:
        data: pandas.DataFrame.
        settings: Dictionary object containing settings parameters.
    Returns:
        A dictionary containing the LSTM model and predictions.
    """

    #  INSTANTIATE MODEL
    model = Sequential()

    #  TRAIN DATA GENERATOR
    train_generator = create_generator(data['train'],
                                       settings['morph'],
                                       shuffle=True)

    #  ADDING LAYERS TO MODEL
    add_lstm_layers(model, data, settings, train_generator[0][0].shape)

    #  COMPILE THE MODEL
    model.compile(loss=settings['loss'], optimizer=settings['optimizer'])

    #  TRAIN USING TRAIN DATA
    model.fit_generator(train_generator,
                        steps_per_epoch=len(train_generator),
                        epochs=settings['epochs'],
                        verbose=0)

    #  TEST DATA GENERATOR
    test_generator = create_generator(data['test'],
                                      settings['morph'],
                                      shuffle=False)

    #  PREDICT USING TEST DATA
    predictions = model.predict(test_generator)

    # denormalized_predictions = ""

    return {'model': model, 'predictions': predictions}
Пример #3
0
def runTrainingClassification(uuid,
                              datasetDir,
                              validDir,
                              classNum,
                              dropoutValue=0.2,
                              batch_size=128,
                              nb_epoch=20,
                              step_size_train=10,
                              alphaVal=0.75,
                              depthMul=1):

    imageGen = ImageDataGenerator(rotation_range=30,
                                  width_shift_range=0.35,
                                  height_shift_range=0.35,
                                  zoom_range=0.35,
                                  shear_range=0.35,
                                  vertical_flip=False,
                                  horizontal_flip=False,
                                  brightness_range=[0.65, 1.35],
                                  rescale=1. / 255)

    trainSet = imageGen.flow_from_directory(datasetDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=batch_size,
                                            class_mode='categorical',
                                            shuffle=True)
    validSet = imageGen.flow_from_directory(validDir,
                                            target_size=(224, 224),
                                            color_mode='rgb',
                                            batch_size=32,
                                            class_mode='categorical',
                                            shuffle=True)

    class EarlyStoppingAtMinLoss(tf.keras.callbacks.Callback):
        def __init__(self, patience=3):
            super(EarlyStoppingAtMinLoss, self).__init__()
            self.patience = patience
            self.best_weights = None

        def on_train_begin(self, logs=None):
            self.wait = 0
            self.stopped_epoch = 0
            self.best = np.Inf
            self.last_acc = 0
            self.atleastepoc = 0

        def on_epoch_end(self, epoch, logs=None):
            current = logs.get('val_loss')
            val_acc = logs.get('val_acc')
            self.atleastepoc = self.atleastepoc + 1
            if np.less(current, self.best
                       ) or self.last_acc < 0.95 or self.atleastepoc < 25:
                self.best = current
                self.wait = 0
                self.last_acc = val_acc
                self.best_weights = self.model.get_weights()
            else:
                self.wait += 1
                if self.wait >= self.patience:
                    self.stopped_epoch = epoch
                    self.model.stop_training = True
                    print(
                        '\nRestoring model weights from the end of the best epoch.'
                    )
                    self.model.set_weights(self.best_weights)

        def on_train_end(self, logs=None):
            if self.stopped_epoch > 0:
                print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))

    base_model = tf.keras.applications.MobileNet(input_shape=(224, 224, 3),
                                                 alpha=alphaVal,
                                                 depth_multiplier=depthMul,
                                                 dropout=dropoutValue,
                                                 pooling='avg',
                                                 include_top=False,
                                                 weights="imagenet",
                                                 classes=classNum)

    mbnetModel = Sequential([
        base_model,
        Dropout(dropoutValue, name='dropout'),
        Dense(classNum, activation='softmax')
    ])

    if classNum == 2:
        mbnetModel.compile(loss='binary_crossentropy',
                           optimizer=RAdam(),
                           metrics=['accuracy'])
    else:
        mbnetModel.compile(
            loss=
            'categorical_crossentropy',  #loss_softmax_cross_entropy_with_logits_v2,
            optimizer=RAdam(),
            metrics=['accuracy'])

    history = History()

    try:
        mbnetModel.fit_generator(generator=trainSet,
                                 steps_per_epoch=step_size_train,
                                 callbacks=[EarlyStoppingAtMinLoss(), history],
                                 epochs=50,
                                 validation_data=validSet)
    except Exception as e:
        return (-14, f'Unexpected Error Found During Triaining, {e}')

    mbnetModel.save(f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5')

    converter = tf.lite.TFLiteConverter.from_keras_model_file(
        f'{localSSDLoc}trained_h5_file/{uuid}_mbnet10.h5',
        custom_objects={
            'RAdam':
            RAdam,
            'loss_softmax_cross_entropy_with_logits_v2':
            loss_softmax_cross_entropy_with_logits_v2
        })
    tflite_model = converter.convert()
    open(f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
         "wb").write(tflite_model)

    subprocess.run([
        f'{nncaseLoc}/ncc',
        f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet10_quant.tflite',
        f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel', '-i',
        'tflite', '-o', 'k210model', '--dataset', validDir
    ])

    if os.path.isfile(
            f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel'):
        return (
            0, f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet10_quant.kmodel',
            history, validSet, mbnetModel)
    else:
        return (-16,
                'Unexpected Error Found During generating Kendryte k210model.')
    check_point = ModelCheckpoint(file_name,
                                  monitor='val_my_metric',
                                  verbose=0,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  mode='auto',
                                  period=1)
    tensor_board = TensorBoard(log_dir='logs/' + file_name[:-3] + '/',
                               histogram_freq=0,
                               write_graph=True,
                               write_images=False)

    result = mlp.fit_generator(generator=generator(steps_per_epoch),
                               steps_per_epoch=steps_per_epoch,
                               epochs=training_epochs,
                               shuffle=shuffle_samples,
                               validation_data=(valid_x, valid_y),
                               verbose=2,
                               callbacks=[check_point, tensor_board])

    print(
        "*************************Finish the softmax output layer training*****************************"
    )
    # saver.save(sess, 'ckpt/sae.ckpt', global_step=epoch)

    # pred = mlp.predict(h
    # print(np.mean(np.abs(pred-train_y[-DATA_A_DAY:])))
    mae = validate(mlp)
    predict(mlp)

    end = datetime.datetime.now()
Пример #5
0
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

"""# Train the model"""

import time

#Total number of steps (batches of samples) to yield from generator before declaring one epoch finished
steps = train_generator.n//train_generator.batch_size

start = time.time()
try:
    history = model.fit_generator(train_generator, epochs=20, verbose=1,
                    steps_per_epoch = steps,
                    validation_data = test_generator,
                    validation_steps = test_generator.n//test_generator.batch_size
                    )
except KeyboardInterrupt:
    pass
end = time.time()
enc_time = end-start

print('Execution time:')
print(str(enc_time))
print()

"""# Save the model"""

filename = os.path.join(to_save, 'test2.h5')
model.save(filename)
Пример #6
0
    model.add(SeparableConv1D(10, kernel_size_second_layer, activation="relu"))
    model.add(AveragePooling1D())
    model.add(SeparableConv1D(3, kernel_size_third_layer, activation="relu"))
    model.add(GlobalAveragePooling1D())
    model.add(Dense(1, activation="sigmoid"))
    model.compile(optimizer=Adam(),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])

    model.build()
    model.summary()
    history = model.fit_generator(
        train_generator,
        epochs=1,
        validation_data=validation_generator,
        class_weight={
            0: 0.5,  # false
            1: 0.5  # true
        },
        callbacks=[EarlyStopping(monitor="val_loss", patience=5)])

    def plot_history(history):
        loss = history.history["loss"]
        val_loss = history.history["val_loss"]
        epochs = range(1, len(loss) + 1)
        plt.figure()
        plt.plot(epochs, loss, "bo", label="Training loss")
        plt.plot(epochs, val_loss, "b", label="Validation loss")
        plt.title("Training and validation loss")
        plt.legend()
        # plt.show()
Пример #7
0
                             featurewise_std_normalization=False,
                             samplewise_std_normalization=False,
                             zca_whitening=False,
                             rotation_range=15,
                             zoom_range=0.1,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=False,
                             vertical_flip=False)
datagen.fit(X_train)

# Fit the model
history = model.fit_generator(datagen.flow(X_train,
                                           y_train,
                                           batch_size=batch_size),
                              epochs=epochs,
                              validation_data=(X_val, y_val),
                              verbose=2,
                              steps_per_epoch=X_train.shape[0],
                              callbacks=[learning_rate_reduction])

# Plot the loss and accuracy curves for both training and validation sets
fig, ax = plt.subplots(2, 1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'],
           color='r',
           label="Validation loss",
           axes=ax[0])
legend = ax[0].legend(loc='best', shadow=True)

ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_acc'], color='r', label="Validation accuracy")
test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(28, 28),
                                            batch_size=16,
                                            class_mode='categorical')

# Utilize callback to store the weights of the best model
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5",
                               monitor='val_acc',
                               verbose=1,
                               save_best_only=True)

# Now it's time to train the model, here we include the callback to our checkpointer
history = classifier.fit_generator(training_set,
                                   steps_per_epoch=100,
                                   epochs=20,
                                   callbacks=[checkpointer],
                                   validation_data=test_set,
                                   validation_steps=50)

# Load our classifier with the weights of the best model
# classifier.load_weights('best_weights.hdf5')
# classifier.save('shapes_cnn.h5')

# Displaying curves of loss and accuracy during training
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
Пример #9
0
# Freezes the weights and other trainable parameters in each layer.
# They will not be updated when we pass in our images of cats and dogs.
for layer in model.layers:
    layer.trainable = False

# add a layer of 2 outputs
model.add(Dense(2, activation='softmax'))

# print model
model.summary()

# Same compile as in original model
model.compile(optimizer=Adam(learning_rate=.0001), loss='categorical_crossentropy', metrics=['accuracy'])

# Learning on 40 images -> 4 steps and 10 images per step
model.fit_generator(generator=train_batches, steps_per_epoch=4,
    validation_data=valid_batches, validation_steps=4, epochs=5, verbose=2)

test_imgs, test_labels = next(test_batches)
test_labels = test_labels[:,0]

# Taking one batch so will be 10 images
predictions = model.predict_generator(generator=test_batches, steps=1, verbose=0)
print(np.round(predictions[:,0]))

cm = confusion_matrix(y_true=test_labels, y_pred=np.round(predictions[:,0]))
print('Configuration matrix')
print(cm)


"""
Пример #10
0
class Classifier:
    def __init__(self, model_path, class_labels_json):
        self.class_labels = class_labels_json
        self.height = 150
        self.width = 150
        self.model_path = model_path
        self.model = Sequential()
        self.class_labels = self.read_class_labels_if_exists()

    def read_class_labels_if_exists(self):
        if exists(self.class_labels):
            with open(self.class_labels, 'r') as file:
                return json.load(file)
        else:
            return None

    def create(self):
        model = self.model
        model.add(Conv2D(32, (3, 3), input_shape=(
            self.width, self.height,
            3)))  # this means - 150 x 150 with 3 channels (RGB image)
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(32, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten()
                  )  # this converts our 3D feature maps to 1D feature vectors
        model.add(Dense(64))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        self.model = self.compile(model)

    def compile(self, model):
        model.compile(loss='binary_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])
        return model

    def fit(self, train, test, model_path):
        self.model.fit_generator(
            train,
            steps_per_epoch=3000 / 16,
            # step size equal to the image count in train dir / batch size
            epochs=50,
            validation_data=test,
            validation_steps=1000 /
            16)  # step size equal to the image count in test dir  / batch size

        self.model.save(model_path)

    def predict(self, path):
        if exists(self.model_path):
            model = load_model(self.model_path)
            X = imgprocess.load_img(path,
                                    target_size=(self.width, self.height))
            X = imgprocess.img_to_array(X)
            X = np.expand_dims(X, axis=0)
            return self.interpret_class(X, model)

    def interpret_class(self, x, model):
        # predict_proba = model.predict_proba(x)
        # if predict_proba[0][0] < 0.5:
        #     return "unknown"

        classes = model.predict_classes(x)

        if self.class_labels is None:
            return classes
        else:
            return [
                k for k in self.class_labels
                if (self.class_labels[k] == classes[0][0])
            ][0]
Пример #11
0
scaler = MinMaxScaler()
scaler.fit(train)
train = scaler.transform(train)
test = scaler.transform(test)

n_input = 12
n_features = 1
generator = TimeseriesGenerator(train, train, length=n_input, batch_size=6)

model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_input, n_features)))
model.add(Dropout(0.15))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')

model.fit_generator(generator, epochs=50)
pred_list = []
batch = train[-n_input:].reshape((1, n_input, n_features))
for i in range(n_input):
    pred_list.append(model.predict(batch)[0])
    batch = numpy.append(batch[:, 1:, :], [[pred_list[i]]], axis=1)

df_predict = pandas.DataFrame(scaler.inverse_transform(pred_list),
                              index=df[-n_input:].index,
                              columns=['Prediction'])

df_test = pandas.concat([df, df_predict], axis=1)

plt.isinteractive()
plt.plot(df_test.index, df_test['Passengers'])
plt.plot(df_test.index, df_test['Prediction'], color='r')
Пример #12
0
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

"""# Train the model"""

import time

#Total number of steps (batches of samples) to yield from generator before declaring one epoch finished
steps = train_generator.n//train_generator.batch_size

start = time.time()
try:
    history = model.fit_generator(train_generator, epochs=100, verbose=1,
                    steps_per_epoch = steps,
                    validation_data = test_generator,
                    validation_steps = test_generator.n//test_generator.batch_size,
                    callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=8, restore_best_weights=True)]
                    )
except KeyboardInterrupt:
    pass
end = time.time()
enc_time = end-start

print('Execution time:')
print(str(enc_time))
print()

"""# Save the model"""

filename = os.path.join(to_save, 'best1000.h5')
model.save(filename)