def train():
    x_train, x_test, y_train, y_test = load_saved_data()
    model = build_model()
    try:
        model.load_weights(filepath=PATH + '/weights.best.basic_cnn.hdf5')
    except Exception:
        print('No saved checkpoints')
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
    model.summary()

    score = model.evaluate(x_test, y_test, verbose=1)
    accuracy = 100 * score[1]

    print(f'Pre-training accuracy: {accuracy:.4f}%')

    num_epochs = 72
    num_batch_size = 256

    checkpointer = ModelCheckpoint(filepath=PATH + '/weights.best.basic_cnn.hdf5',
                                   verbose=1, save_best_only=True)
    start = datetime.now()

    model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test),
              callbacks=[checkpointer], verbose=1)

    duration = datetime.now() - start
    print("Training completed in time: ", duration)
    def train(model,
              X,
              y,
              name,
              epochs=100,
              validation_split=0.3,
              batch_size=556):
        """
        Train the model
        :param model: compiled Keras model
        :param X: dataset
        :param y: dataset labels
        :param name: name of the model
        :param epochs: how many time you want to go over the data
        :param validation_split: ratio of splitting into train/validation sets
        :param batch_size: How much data get through at once
        :return: None
        """

        # Adding a time stamp to the name so you cant all ways unique names
        name = f"{name}_{int(time.time())}"
        # Setting up Tensorboard Callback to watch the Graph of accuracy and loss
        tensor_board = TensorBoard(log_dir=f'logs\{name}')
        # Setting up ModelCheckpoint the save the model on the best validation accuracy
        checkpoint = ModelCheckpoint(f'models\{name}',
                                     monitor='val_accuracy',
                                     mode='max')
        # Train the Model
        model.fit(X,
                  y,
                  validation_split=validation_split,
                  epochs=epochs,
                  batch_size=batch_size,
                  shuffle=True,
                  callbacks=[tensor_board, checkpoint])
示例#3
0
def create_model():
    checkpoint = ModelCheckpoint('sdr_model.h5',
                                 monitor='accuracy',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    gray_data = np.load("npy_data/gray_dataset.npy")
    color_data = np.load("npy_data/color_dataset.npy")
    # img_pixel_dataset = np.load("npy_data/img_pixel_dataset.npy")
    label = np.load("npy_data/label.npy")

    # dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data)
    dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data)
    # corr_matrix = np.corrcoef(dataset)
    # print(corr_matrix)
    le = preprocessing.LabelEncoder()
    label = le.fit_transform(label)

    x_train, x_test, y_train, y_test = train_test_split(dataset,
                                                        label,
                                                        test_size=0.20,
                                                        shuffle=True)

    model = Sequential()
    model.add(Dense(14, input_dim=14, activation=None))
    model.add(Dense(128, activation='tanh'))
    model.add(Dense(256, activation='sigmoid'))
    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              epochs=150,
              verbose=0,
              batch_size=20,
              shuffle=True,
              callbacks=callbacks_list)

    pred_y_test = model.predict_classes(x_test)

    acc_model = accuracy_score(y_test, pred_y_test)
    print("Prediction Acc model:", acc_model)
    print("Org. Labels:", y_test[:30])
    print("Pred Labels:", (pred_y_test[:30]))
    # c_report = classification_report(y_test, pred_y_test, zero_division=0)
    # print(c_report)
    print("\n\n")
示例#4
0
 def get_callbacks():
     callbacks = [
         ModelCheckpoint(filepath='tagger.model.best.hdf5',
                         verbose=1, save_best_only=True),
         EarlyStopping(
             # Stop training when `val_loss` is no longer improving
             monitor='val_loss',
             # "no longer improving" being defined as "no better than 1e-3 less"
             min_delta=1e-3,
             # "no longer improving" being further defined as "for at least 4 epochs"
             patience=4,
             verbose=1),
     ]
     return callbacks
    def ModelCheckpointCallBack(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch',
                                **kwargs):
        """ModelCheckpoint Callback
            エポックごとにモデルを保存します

        Args:
            filepath (str): モデルファイルを保存するパス
            monitor (str, optional): 監視する数量. Defaults to 'val_loss'.
            verbose (int, optional): 詳細モード、0または1. Defaults to 0.
            save_best_only (bool, optional): Trueの場合監視数量に応じた最新の最良モデルは上書きされません. Defaults to False.
            save_weights_only (bool, optional): Trueの場合、モデルのウェイトのみmodel.save_weights(filepath)が保存されます(model.save(filepath)). Defaults to False.
            mode (str, optional): {auto、min、max}. Defaults to 'auto'.
            save_freq (str, optional): 'epoch'または整数. Defaults to 'epoch'.
        """
        return ModelCheckpoint(filepath=filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only, save_weights_only=save_weights_only,
                               mode=mode, save_freq=save_freq, **kwargs)
示例#6
0
def run():
    model = build_model()
    print(model.summary())

    model.compile(optimizer=RMSprop(lr=1e-3),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.01,
                               patience=5,
                               verbose=2)
    filepath = 'data/saved-model-epoch{epoch:02d}-val_loss{val_loss:.4f}.hdf5'

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 mode='max')

    train_data = np.load('data/processed/train_v2.npz')
    X_train, y_train = train_data['X'], train_data['y']
    model.fit(X_train,
              y_train,
              epochs=1,
              batch_size=256,
              validation_split=0.3,
              callbacks=[early_stop, checkpoint])

    print('Loading test data...')
    test_data = np.load('data/processed/test_v2.npz')
    X_test, y_test = test_data['X'], test_data['y']
    y_pred = model.predict_classes(X_test, batch_size=256, verbose=1)

    output_file_path = f'data/predictions-{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}.csv'
    print(f'Persisting predictions: {output_file_path}')
    df_pred = pd.DataFrame(y_pred)
    df_pred.to_csv(output_file_path)

    print(classification_report(y_test, y_pred))

    print('Done')
示例#7
0
    def __init__(self, state_size, action_size, gamma, epsilon, learning_rate, log_dir, batch_size):
        self.replay_buffer = {'state': [], 'action': [], 'reward': [], 'next_state': [], 'done': []}
        self.replay_buffer_size = 0

        self.state_size = state_size
        self.action_size = action_size

        self.learning_rate = learning_rate
        self.gamma = gamma
        self.epsilon = epsilon

        self.batch_size = batch_size
        self.exploration_min = 0.01
        self.exploration_decay = 0.995

        self.max_experiences = 10000

        self.filepath_best = "model_weights_{}_{}_{}_best.hdf5".format(str(gamma), str(epsilon), str(learning_rate))
        self.filepath_last = "model_weights_{}_{}_{}_last.hdf5".format(str(gamma), str(epsilon), str(learning_rate))

        self.tb_call_back = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False)
        self.checkpoint = ModelCheckpoint(
            self.filepath_best,
            monitor='loss',
            verbose=0,
            save_best_only=True,
            save_weights_only=True,
            save_freq=10,
            mode='min'
        )
        self.reduce_lr = ReduceLROnPlateau(
            monitor='loss',
            factor=0.1,
            patience=10,
            verbose=1,
            mode='auto',
            min_delta=0.0001,
            cooldown=0,
            min_lr=0
        )
        self.model = self._build_model()
    def train_model(self,
                    model,
                    epochs=25,
                    model_name="default",
                    use_checkpoint=False):
        if not self.train_init:
            print("Datasets not initialized")
            return

        history3 = History()
        callbacks = [
            history3,
            EarlyStopping(monitor='val_loss',
                          patience=3,
                          verbose=1,
                          min_delta=1e-4),
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=1,
                              cooldown=0,
                              min_lr=1e-7,
                              verbose=1),
            ModelCheckpoint(filepath='weights/' + model_name +
                            '_weights.best.hdf5',
                            verbose=1,
                            save_best_only=True,
                            save_weights_only=True,
                            mode='auto')
        ]

        if use_checkpoint:
            model.load_weights('weights/' + model_name + '_weights.best.hdf5')

        model.fit(self.train_ds,
                  epochs=epochs,
                  validation_data=self.create_dataset(self.X_val,
                                                      self.y_val_bin),
                  callbacks=callbacks)
示例#9
0
def train_model(model: models.Sequential, train_x: np.ndarray,
                train_y: np.ndarray):
    print("Training Model")
    checkpoint = ModelCheckpoint("models/auto_save.h5",
                                 monitor='accuracy',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='auto',
                                 period=2)
    lr_scheduler = LearningRateScheduler(learning_rate_scheduler, verbose=1)

    history = model.fit([train_x, train_x],
                        train_y,
                        batch_size=256,
                        epochs=50,
                        verbose=1,
                        validation_split=TEST_SPLIT,
                        shuffle=True,
                        callbacks=[checkpoint, lr_scheduler])

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
    ax1.plot(history.history['accuracy'], label='accuracy')
    ax1.plot(history.history['val_accuracy'], label='val_accuracy')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Accuracy')
    ax1.legend(loc='lower right')

    ax2.plot(history.history['loss'], label='loss')
    ax2.plot(history.history['val_loss'], label='val_loss')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Loss')
    ax2.legend(loc='lower right')
    plt.savefig("models/training_progress.png")
    plt.show()

    model.save(MODEL_PATH)
示例#10
0
model.add(tf.keras.layers.Dense(units=256, activation='relu'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(units=256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Dense(units=256, activation='relu'))
model.add(tf.keras.layers.Dense(units=6))

model.summary()

# Keep only a single checkpoint, the best over test accuracy.
modelName = "CNNv4_MarcinOnly_DropOut04"
filepath = "checkpoints/chpt_CNNv4_MarcinOnly_DropOut04-{epoch:04d}-loss-{val_loss:.2f}-Metric-{val_mean_absolute_error:.3f}.h5"
checkpoint = ModelCheckpoint(
    filepath,
    monitor='val_mean_absolute_error',  #!!!
    verbose=1,
    save_best_only=True,
    mode='auto',
    period=1)

csv_fileName = "logs/Log_{}.csv".format(modelName)
logger = tf.keras.callbacks.CSVLogger(csv_fileName,
                                      separator=',',
                                      append=False)

model.compile(loss="mean_squared_error",
              optimizer="adam",
              metrics=["mean_absolute_error"])

model.fit(x=np.array(train_Img),
          y=np.array(train_Lab),
示例#11
0
def trainmodelwithnpy(model, savepath):
    # root_for_covid='F:/Edge_download/data/dicom/new/dicom_archive_v2.tar/'
    # rootpath='F:/Edge_download/data/archive/chest_xray/chest_xray/'
    if not os.path.exists('./savedmodel/' + savepath):
        os.makedirs('./savedmodel/' + savepath)
    root_for_covid = 'D:/dataset/Covid/'
    rootpath = 'D:/dataset/Other/'
    generate_dataset = (lambda split: [
        root_for_covid + split, rootpath + split + 'PNEUMONIA/', rootpath +
        split + 'VIRUS/', rootpath + split + 'NORMAL/'
    ])
    # trainset path
    trainset_filelist = generate_dataset('train/')
    # valset path
    valset_filelist = generate_dataset('val/')
    # testset path
    testset_filelist = generate_dataset('test/')

    partition_train, labels_train = generate_partitionandlabel(
        trainset_filelist, 3000)
    training_generator = DataGenerator(partition_train,
                                       labels_train,
                                       batch_size=16,
                                       dim=(224, 224),
                                       n_channels=3,
                                       n_classes=4,
                                       shuffle=True,
                                       filelist=trainset_filelist,
                                       split="train")
    partition_val, labels_val = generate_partitionandlabel(
        valset_filelist, 3000)
    validation_generator = DataGenerator(partition_val,
                                         labels_val,
                                         batch_size=16,
                                         dim=(224, 224),
                                         n_channels=3,
                                         n_classes=4,
                                         shuffle=True,
                                         filelist=valset_filelist,
                                         split="val")
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0.01,
                                  patience=15,
                                  verbose=0,
                                  mode='min')
    mcp_save = ModelCheckpoint('./savedmodel/' + savepath +
                               '/save_at_{epoch}.h5',
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=7,
                                       verbose=1,
                                       epsilon=1e-4,
                                       mode='min')
    model.compile(
        optimizer=keras.optimizers.Adam(1e-2),
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )
    # Train model on dataset
    history = model.fit_generator(
        generator=training_generator,
        validation_data=validation_generator,
        steps_per_epoch=161,
        epochs=50,
        workers=8,
        max_queue_size=100,
        validation_steps=61,
        use_multiprocessing=False,
        callbacks=[earlyStopping, mcp_save, reduce_lr_loss])
    training_loss = history.history["loss"]
    train_acc = history.history["accuracy"]
    test_loss = history.history["val_loss"]
    test_acc = history.history["val_accuracy"]
    epoch_count = range(1, len(training_loss) + 1)
    plt.plot(epoch_count, training_loss, 'r--')
    plt.plot(epoch_count, test_loss, 'b--')
    plt.legend(["Training_loss", "Test_loss"])
    plt.xlabel("Epoch")
    plt.ylabel("loss")
    plt.show()
    # X,y=validation_generator.__getitem__(1)

    plt.plot(epoch_count, train_acc, 'r--')
    plt.plot(epoch_count, test_acc, 'b--')
    plt.legend(["train_acc", "test_acc"])
    plt.xlabel("Epoch")
    plt.ylabel("acc")
    plt.show()
示例#12
0
def model_callback_factory(exp_path: str, model_subfolder: str, *args,
                           **kwargs):
    filepath = join(exp_path, model_subfolder)
    filepath = subs_path_and_create_folder(filepath)
    return ModelCheckpoint(filepath=filepath, *args, **kwargs)
示例#13
0
def trainmodel(model=prepare_dataset.make_model("VGG19",
                                                input_shape=(
                                                    224,
                                                    224,
                                                    3,
                                                ),
                                                num_classes=4),
               savepath="VGG19",
               trainsetpath='D:/dataset/Crop/train/',
               valsetpath='D:/dataset/Crop/val/'):
    if not os.path.exists('./savedmodel/localbranch/' + savepath):
        os.makedirs('./savedmodel/localbranch/' + savepath)
    train_datagen = ImageDataGenerator(rotation_range=10,
                                       width_shift_range=0.1,
                                       height_shift_range=0.1,
                                       rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(
        trainsetpath,
        target_size=(224, 224),
        batch_size=16,
        class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
        valsetpath,
        target_size=(224, 224),
        batch_size=16,
        class_mode='categorical')
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0.01,
                                  patience=6,
                                  verbose=0,
                                  mode='min')
    mcp_save = ModelCheckpoint('./savedmodel/localbranch/' + savepath +
                               '/save_at_{epoch}.h5',
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=4,
                                       verbose=1,
                                       epsilon=1e-4,
                                       mode='min')
    model.compile(
        optimizer=keras.optimizers.Adam(1e-4),
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )
    # Train model on dataset
    history = model.fit_generator(
        #     generator=training_generator,
        # validation_data=validation_generator,
        generator=train_generator,
        validation_data=validation_generator,
        steps_per_epoch=400,
        epochs=80,
        workers=8,
        max_queue_size=100,
        validation_steps=132,
        use_multiprocessing=False,
        callbacks=[earlyStopping, mcp_save, reduce_lr_loss])
    training_loss = history.history["loss"]
    train_acc = history.history["accuracy"]
    test_loss = history.history["val_loss"]
    test_acc = history.history["val_accuracy"]
    epoch_count = range(1, len(training_loss) + 1)
    plt.plot(epoch_count, training_loss, 'r--')
    plt.plot(epoch_count, test_loss, 'b--')
    plt.legend(["Training_loss", "Test_loss"])
    plt.xlabel("Epoch")
    plt.ylabel("loss")
    plt.savefig('./savedmodel/localbranch/' + savepath + '/loss.jpg')
    plt.show()
    plt.plot(epoch_count, train_acc, 'r--')
    plt.plot(epoch_count, test_acc, 'b--')
    plt.legend(["train_acc", "test_acc"])
    plt.xlabel("Epoch")
    plt.ylabel("acc")
    plt.savefig('./savedmodel/localbranch/' + savepath + '/acc.jpg')
    plt.show()
示例#14
0
    x = layer(x)
x = Dropout(0.2, name='dropout_2')(x)
output = layers[-1](x)

model = Model(inputs=inp, outputs=output)
model.summary()
model.compile(optimizer='Adam',
              loss='binary_crossentropy',
              metrics=['categorical_accuracy'])

callbacks = []
filesToSave = 'weights.{epoch:03d}-{val_categorical_accuracy:.4f}.hdf5'
checkpoint = ModelCheckpoint(path_to_history + filesToSave,
                             monitor='val_categorical_accuracy',
                             verbose=0,
                             save_weights_only=True,
                             save_best_only=True,
                             mode='max',
                             period=1)
callbacks.append(checkpoint)

history = model.fit(x_train,
                    y_train,
                    batch_size=200,
                    epochs=10,
                    verbose=1,
                    validation_data=(x_test, y_test),
                    callbacks=callbacks)
history = history.history

acc_history = history['categorical_accuracy']
示例#15
0
import tensorflow as tf
from tensorflow_core.python.keras.callbacks import ModelCheckpoint, CSVLogger
import DataManager as dm

filepath = "checkpoints/checkpointBlackAndWhite-{epoch:04d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='accuracy',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')

csv_fileName = "logs/CSV_log_BlackAndWhite.csv"
logger = CSVLogger(csv_fileName, separator=',', append=False)

npImgArray, npLabelArray = dm.getImgAndLables()

model = tf.keras.models.load_model(
    "checkpoints/checkpointBlackAndWhite-0216.h5")

model.fit(npImgArray,
          npLabelArray,
          epochs=10,
          batch_size=5,
          validation_split=0.2,
          callbacks=[checkpoint, logger])

model.save("./savedModels/myModelBlackAndWhite.h5")
示例#16
0
    tf.keras.layers.Conv2D(filters=128,
                           kernel_size=3,
                           padding="same",
                           activation="relu"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=256, activation='relu'))
model.add(tf.keras.layers.Dense(units=6))

model.summary()

# Keep only a single checkpoint, the best over test accuracy.
modelName = "CNNv2_logcosh"
filepath = "checkpoints/chpt_CNNv2_RGB-{epoch:04d}-loss-{val_loss:.2f}-Metric-{val_mean_absolute_error:.1f}.h5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_mean_absolute_error',
                             verbose=1,
                             mode='auto',
                             period=1)

csv_fileName = "logs/CSV_log_RGB_{}.csv".format(modelName)
logger = tf.keras.callbacks.CSVLogger(csv_fileName,
                                      separator=',',
                                      append=False)

model.compile(loss="mean_squared_error",
              optimizer="adam",
              metrics=["mean_absolute_error"])

model.fit(x=np.array(train_Img),
          y=np.array(train_Lab),
          epochs=100,
示例#17
0
# Dataset filepath
data_raw_lr_filenames = get_filenames(data_path + '_BICUBIC')
data_raw_hr_filenames = get_filenames(data_path + '_CROPPED')
batch_generator_train = SRCNNGenerator(data_raw_lr_filenames, data_raw_hr_filenames, batch_size=batch_size)

# calculates bicubic psnr before training prints after every epoch
bicubic_callback = BicubicPSNR(train_generator=batch_generator_train)

# Checkpoint part
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Initialize saver
cp_callback = ModelCheckpoint(
    filepath=checkpoint_path,
    verbose=1,
    save_weights_only=True,
    period=1)

# Return model and initial_epoch
srcnn_model, initial_epoch = LoadWeight(
    filepath=checkpoint_dir,
    model=srcnn_model
).load_weight()

# callback for shuffling filenames
shuffle_callback = LambdaCallback(on_epoch_end=lambda epoch, logs=None: batch_generator_train.shuffle_names())

# Fit function
srcnn_model.fit_generator(generator=batch_generator_train,
                          # steps_per_epoch=int(len(data_raw_hr_filenames) / batch_size),
示例#18
0
文件: train.py 项目: Slownite/Prequel
    model.add(Dropout(dropout_value))
    model.add(BatchNormalization())
    model.add(Dropout(dropout_value))
    model.add(Dense(vocab_size))
    model.add(Activation(activation_function))
    model.compile(loss=loss_function, optimizer=optimizer)
    return model


if __name__ == "__main__":
    import time

    filepath = "training_weight/weights-save-{epoch:04d}-{loss:.4f}-bigger.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    NAME = "prequel_generator_version_1-{}".format(int(time.time()))
    # callbacks
    callbacks_list = [checkpoint]
    notes = load_data()
    network_input, network_output, vocab_size, pitchnames = to_integer_base(
        notes, 100)
    model = create_model()
    epochs = 200
    model.fit(network_input,
              network_output,
              epochs=epochs,
              batch_size=90,
              callbacks=callbacks_list)
示例#19
0
def train_fusion_model(model, name, threshold):
    if not os.path.exists('./savedmodel/globalbranch/' + name + '/' +
                          threshold):
        os.makedirs('./savedmodel/globalbranch/' + name + '/' + threshold)
    train_datagen = ImageDataGenerator(
        # rotation_range=10,
        # width_shift_range=0.1,
        # height_shift_range=0.1,
        rescale=1. / 255
        # shear_range=0.2,
        # zoom_range=0.2,
        # horizontal_flip=True
    )
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    class ImageWithNames(DirectoryIterator):
        def __init__(self, *args, **kwargs):
            super().__init__(*args, **kwargs)
            self.filenames_np = np.array(self.filepaths)
            # self.class_mode = None  # so that we only get the images back

        def _get_batches_of_transformed_samples(self, index_array):
            return (super()._get_batches_of_transformed_samples(index_array),
                    self.filenames_np[index_array])

    def generate_generator_multiple(generator, dir1, dir2):
        # random.seed(1)
        train_generator = ImageWithNames(dir1,
                                         generator,
                                         target_size=(224, 224),
                                         batch_size=8,
                                         class_mode='categorical',
                                         seed=7)
        #     generator.flow_from_directory(
        #     dir1,
        #     target_size=(224, 224),
        #     batch_size=8,
        #     class_mode='categorical',
        #     seed = 7
        # )
        train1_generator = ImageWithNames(dir2,
                                          generator,
                                          target_size=(224, 224),
                                          batch_size=8,
                                          class_mode='categorical',
                                          seed=7)
        #     generator.flow_from_directory(
        #     dir2,
        #     target_size=(224, 224),
        #     batch_size=8,
        #     class_mode='categorical',
        #     seed=7
        # )
        while True:
            X1i, namelist1 = train_generator.next()
            # print(namelist1)
            X2i, namelist2 = train1_generator.next()
            # print(X1i[1],X2i[1])
            # print(namelist2)
            # X1i=train_generator._get_batches_of_transformed_samples(np.array([0,5,6,11,33,45,77,32]))
            # X2i=train1_generator._get_batches_of_transformed_samples(np.array([0,5,6,11,33,45,77,32]))
            # X1i = train_generator.next()
            # X2i = train1_generator.next()
            yield [X1i[0],
                   X2i[0]], X1i[1]  # Yield both images and their mutual label

    train_generator = generate_generator_multiple(
        generator=train_datagen,
        dir1='D:/dataset/Segmentation/train',
        dir2='D:/dataset/Crop/' + name + '/' + threshold + '/train')

    validation_generator = generate_generator_multiple(
        generator=test_datagen,
        dir1='D:/dataset/Segmentation/val',
        dir2='D:/dataset/Crop/' + name + '/' + threshold + '/val')
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0.01,
                                  patience=6,
                                  verbose=0,
                                  mode='min')
    mcp_save = ModelCheckpoint('./savedmodel/globalbranch/' + name + '/' +
                               threshold + '/save_at_{epoch}.h5',
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=4,
                                       verbose=1,
                                       epsilon=1e-4,
                                       mode='min')
    model.compile(
        optimizer=keras.optimizers.Adam(1e-2),
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )
    # Train model on dataset
    history = model.fit_generator(
        #     generator=training_generator,
        # validation_data=validation_generator,
        generator=train_generator,
        validation_data=validation_generator,
        steps_per_epoch=400,
        epochs=80,
        workers=1,
        max_queue_size=100,
        validation_steps=132,
        use_multiprocessing=False,
        callbacks=[earlyStopping, mcp_save, reduce_lr_loss])
    training_loss = history.history["loss"]
    train_acc = history.history["accuracy"]
    test_loss = history.history["val_loss"]
    test_acc = history.history["val_accuracy"]
    epoch_count = range(1, len(training_loss) + 1)
    plt.plot(epoch_count, training_loss, 'r--')
    plt.plot(epoch_count, test_loss, 'b--')
    plt.legend(["Training_loss", "Test_loss"])
    plt.xlabel("Epoch")
    plt.ylabel("loss")
    plt.savefig('./savedmodel/globalbranch/' + name + '/' + threshold +
                '/loss.jpg')
    plt.show()
    plt.plot(epoch_count, train_acc, 'r--')
    plt.plot(epoch_count, test_acc, 'b--')
    plt.legend(["train_acc", "test_acc"])
    plt.xlabel("Epoch")
    plt.ylabel("acc")
    plt.savefig('./savedmodel/globalbranch/' + name + '/' + threshold +
                '/acc.jpg')
    plt.show()
示例#20
0
#     else:
#         df_train = df_train.append(lst[0:len(lst)], ignore_index=True)

print("creating labels...")
labels_strings = df_train['cuisine'].unique()
target_dict = {n: i for i, n in enumerate(labels_strings)}
outputs = df_train['cuisine'].map(target_dict)

print("reading test data...")
df_test = pd.read_csv("./data/cooking_test_v2.csv", sep=',')

print("defining model...")

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
mc = ModelCheckpoint('./models/best_model.h5',
                     monitor='val_loss',
                     mode='min',
                     save_best_only=True)

model = tf.keras.models.Sequential([
    tf.keras.layers.InputLayer(input_shape=(len(df_train.columns) - 2, )),
    # tf.keras.layers.Dense(10, activation=tf.keras.layers.LeakyReLU(alpha=0.1)),
    # tf.keras.layers.Dense(10, activation=tf.keras.layers.LeakyReLU(alpha=0.1)),
    tf.keras.layers.Dense(400, activation='tanh'),
    tf.keras.layers.Dense(40, activation='tanh'),
    tf.keras.layers.Dense(len(df_train['cuisine'].unique()),
                          activation='softmax')
])

# saved_model = load_model('best_model.h5')
print("compiling model...")
model.compile(
示例#21
0
def train_model_batch_generator(image_dir=None,
                                label_dir=None,
                                model_out_name="model.h5",
                                **args):

    bg = batch_generator(
        image_dir,
        label_dir,
        batch_size=BATCH_SIZE,
        validation_batch_size=VALIDATION_BATCH_SIZE,
        training_split=0.8,
        output_size=OUTPUT_SIZE,  # (H, W)
        scale_size=SCALE_SIZE,  # (H, W)
        include_only_crops_with_mask=True,
        augment=True)

    model = models.get_model(BATCH_SIZE,
                             width=OUTPUT_SIZE[1],
                             height=OUTPUT_SIZE[0])

    # frozen_layer = model.get_layer('block1_conv0')

    checkpoint_name = NAME + '.h5'

    # class LearningRateTracker(keras.callbacks.Callback):
    #     def on_epoch_end(self, epoch, logs={}):
    #         optimizer = self.model.optimizer
    #         lr = K.eval(optimizer.lr * (1. / (1. + optimizer.decay * optimizer.iterations)))
    #         print('\nLR: {:.6f}\n'.format(lr))

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=20,
                                  min_lr=0.0001)

    def unfreeze(model, epoch):
        if epoch == 20:
            model.layers[2].trainable = True
            optimizer = Adam(lr=1e-4)
            model.compile(optimizer=optimizer,
                          loss='binary_crossentropy',
                          metrics=['accuracy'])

    unfreeze_layers = keras.callbacks.LambdaCallback(
        on_epoch_end=lambda epoch, logs: unfreeze(model, epoch))

    class LRTensorBoard(TensorBoard):
        ''' add the learning rate to tensorboard '''
        def __init__(self,
                     log_dir,
                     histogram_freq=1,
                     write_grads=True,
                     write_images=True
                     ):  # add other arguments to __init__ if you need
            super().__init__(log_dir=log_dir,
                             histogram_freq=1,
                             write_grads=True,
                             write_images=True)

        def on_epoch_end(self, epoch, logs=None):
            logs.update({'lr': K.eval(self.model.optimizer.lr)})
            super().on_epoch_end(epoch, logs)

    tensorboard = TensorBoard(log_dir="logs\\{}".format(NAME),
                              histogram_freq=1,
                              write_grads=True,
                              write_images=False)

    callbacks = [
        tensorboard,
        reduce_lr,
        #unfreeze_layers,
        # LearningRateTracker(),
        EarlyStopping(monitor='val_loss', patience=PATIENCE, verbose=0),
        ModelCheckpoint(checkpoint_name,
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=0),
    ]

    X_test, Y_test = bg.validation_batch()

    history = model.fit_generator(
        bg.training_batch(),
        validation_data=(X_test, Y_test),
        epochs=EPOCHS,
        steps_per_epoch=bg.steps_per_epoch,
        validation_steps=bg.validation_steps,
        verbose=1,
        shuffle=False,
        # class_weight=CLASS_WEIGHT,
        callbacks=callbacks)

    # Save the model locally
    model.save(model_out_name)