Ejemplo n.º 1
0
train_labels = train_labels[:, :, :, np.newaxis]
valid_labels = valid_labels[:, :, :, np.newaxis]

#类型转换
train_images = train_images.astype('float32') 
train_labels = train_labels.astype('float32')
valid_images = valid_images.astype('float32')
valid_labels = valid_labels.astype('float32')

train_images /= 255
train_labels /= 255
valid_images /= 255
valid_labels /= 255

# 导入模型
model = model_unet.unet(IMAGE_SIZE=IMAGE_SIZE)

#模型设置
model_checkpoint = ModelCheckpoint(save_model_path, monitor='loss',verbose=1, save_best_only=True, mode='auto', period=1)
EarlyStopping = EarlyStopping(monitor='loss', patience=50, verbose=1)
tensorboard = TensorBoard(log_dir=weights_save_path, histogram_freq=1)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_lr=0.001)

callback_lists = [model_checkpoint, EarlyStopping, tensorboard, reduce_lr]

# 训练
model.fit(train_images,
          train_labels,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(valid_images, valid_labels),
Ejemplo n.º 2
0
def training(path_save_spectrogram, weights_path, name_model,
             training_from_scratch, epochs, batch_size):
    """ This function will read noisy voice and clean voice spectrograms created by data_creation mode,
    and train a Unet model on this dataset for epochs and batch_size specified. It saves best models to disk regularly
    If training_from_scratch is set to True it will train from scratch, if set to False, it will train
    from weights (name_model) provided in weights_path
    """
    #load noisy voice & clean voice spectrograms created by data_creation mode
    X_in = np.load(path_save_spectrogram + 'noisy_voice_amp_db' + ".npy")
    X_ou = np.load(path_save_spectrogram + 'voice_amp_db' + ".npy")
    #Model of noise to predict
    X_ou = X_in - X_ou

    #Check distribution
    print(stats.describe(X_in.reshape(-1, 1)))
    print(stats.describe(X_ou.reshape(-1, 1)))

    #to scale between -1 and 1
    X_in = scaled_in(X_in)
    X_ou = scaled_ou(X_ou)

    #Check shape of spectrograms
    print(X_in.shape)
    print(X_ou.shape)
    #Check new distribution
    print(stats.describe(X_in.reshape(-1, 1)))
    print(stats.describe(X_ou.reshape(-1, 1)))

    #Reshape for training
    X_in = X_in[:, :, :]
    X_in = X_in.reshape(X_in.shape[0], X_in.shape[1], X_in.shape[2], 1)
    X_ou = X_ou[:, :, :]
    X_ou = X_ou.reshape(X_ou.shape[0], X_ou.shape[1], X_ou.shape[2], 1)

    X_train, X_test, y_train, y_test = train_test_split(X_in,
                                                        X_ou,
                                                        test_size=0.10,
                                                        random_state=42)

    #If training from scratch
    if training_from_scratch:

        generator_nn = unet()
    #If training from pre-trained weights
    else:

        generator_nn = unet(pretrained_weights=weights_path + name_model +
                            '.h5')

    #Save best models to disk during training
    checkpoint = ModelCheckpoint(weights_path + '/weigths_HUBER_N2C.h5',
                                 verbose=1,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 mode='auto')

    generator_nn.summary()
    #Training
    history = generator_nn.fit(X_train,
                               y_train,
                               epochs=epochs,
                               batch_size=batch_size,
                               shuffle=True,
                               callbacks=[checkpoint],
                               verbose=1,
                               validation_data=(X_test, y_test))

    #Plot training and validation loss (log scale)
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(1, len(loss) + 1)

    plt.plot(epochs, loss, label='Training loss')
    plt.plot(epochs, val_loss, label='Validation loss')
    plt.yscale('log')
    plt.title('Training and validation loss')
    plt.legend()
    plt.show()
Ejemplo n.º 3
0
def model_prediction(image_path, label_path, IMAGE_SIZE, model_list,
                     pre_image_dir):
    '''
    1、加载模型
    2、读取图像
    3、预测
    4、保存
    '''
    # 存储评估结果
    result = {}
    result['data'] = []
    result['image'] = image_path
    result['IMAGE_SIZE'] = str(IMAGE_SIZE)

    # 加载图像并添加一维以适应模型的输入
    print('{}: Load predict image...'.format(datetime.now().strftime('%c')))
    x_pre, y_label = load_image(image_path, label_path, IMAGE_SIZE)

    # 变成4维, 以适应计算需求
    x_pre_4d = x_pre[np.newaxis, :, :, :]
    y_label_4d = y_label[np.newaxis, :, :, :]

    for i in range(len(model_list)):

        # 切割出模型的名字
        model_name = os.path.splitext(
            model_list[i].split("\\")[-1])[0]  # for Window: '\\', Linux: '/'
        print(
            '{}: =============================== {}: {} ==============================='
            .format(datetime.now().strftime('%c'), i, model_name))

        # 加载模型参数
        #model = load_model(save_model_path)
        model = model_unet.unet(IMAGE_SIZE=IMAGE_SIZE)
        model.load_weights(model_list[i])

        # 模型预测并调整越策结果
        y_pre = model.predict(x_pre_4d)
        y_pre = np.squeeze(y_pre, axis=0)
        y_pre = np.round(y_pre)

        # 评估模型
        scores = model.evaluate(x_pre_4d, y_label_4d, verbose=1)
        print('{}: loss: {:.5f}'.format(datetime.now().strftime('%c'),
                                        scores[0]))
        print('{}: acc: {:.5f}'.format(datetime.now().strftime('%c'),
                                       scores[1]))
        precision, recall, f1 = calculation(y_label, y_pre, IMAGE_SIZE)
        # 保存评估结果
        tmp = {}
        tmp = {
            "model_name": model_name,
            "Loss": str(round(scores[0], 5)),
            "Accuracy": str(round(scores[1], 5)),
            "Precision": str(round(precision, 5)),
            "Recall": str(round(recall, 5)),
            "F1": str(round(f1, 5))
        }

        result['data'].append(tmp)

        # 保存预测结果图
        y_pre *= 255
        pre_image_name = model_name + '.png'
        pre_image_result = os.path.join(pre_image_dir, pre_image_name)  # 结果图
        cv2.imwrite(pre_image_result, y_pre)
        result['pre_image'] = pre_image_result
        print('{}: Save model:{} prefict result success!'.format(
            datetime.now().strftime('%c'), model_name))

    # 存储成json格式
    with open(os.path.join(pre_image_dir, 'evaluated_results.txt'),
              'w',
              encoding="utf-8") as f:
        f.write(json.dumps(result))
Ejemplo n.º 4
0
def training(path_save_spectrogram, weights_path, name_model,
             training_from_scratch, epochs, batch_size):
    """ This function will read noisy voice and clean voice spectrograms created by data_creation mode,
    and train a Unet model on this dataset for epochs and batch_size specified. It saves best models to disk regularly.
    If training_from_scratch is set to True it will train from scratch, if set to False, it will train
    from weights (name_model) provided in weights_path
    """
    #load noisy voice & clean voice spectrograms created by data_creation mode
    X_in = np.load(path_save_spectrogram + 'noisy_voice_amp_db' + ".npy")
    X_ou = np.load(path_save_spectrogram + 'voice_amp_db' + ".npy")
    #Model of noise to predict
    X_ou = X_in - X_ou

    #Check distribution
    print(stats.describe(X_in.reshape(-1, 1)))
    print(stats.describe(X_ou.reshape(-1, 1)))

    #to scale between -1 and 1
    X_in = scaled_in(X_in)
    X_ou = scaled_ou(X_ou)

    #Check shape of spectrograms
    print(X_in.shape)
    print(X_ou.shape)
    #Check new distribution
    print(stats.describe(X_in.reshape(-1, 1)))
    print(stats.describe(X_ou.reshape(-1, 1)))

    #Reshape for training
    X_in = X_in[:, :, :]
    X_in = X_in.reshape(X_in.shape[0], X_in.shape[1], X_in.shape[2], 1)
    X_ou = X_ou[:, :, :]
    X_ou = X_ou.reshape(X_ou.shape[0], X_ou.shape[1], X_ou.shape[2], 1)

    X_train, X_test, y_train, y_test = train_test_split(X_in,
                                                        X_ou,
                                                        test_size=0.10,
                                                        random_state=42)

    #If training from scratch
    if training_from_scratch:
        print("\nTraining from scratch\n.")
        generator_nn = unet()
    #If training from pre-trained weights
    else:
        pretrained_weights = "{}/{}.h5".format(weights_path, name_model)
        print("\nTraining from pre-trained weights: {}\n".format(
            pretrained_weights))
        generator_nn = unet(pretrained_weights=pretrained_weights)

    # Save model each epoch, just in in case
    weights_name_each = "model_and_weights-{epoch:02d}.h5"

    checkpoint_each = ModelCheckpoint(weights_path + weights_name_each,
                                      monitor='val_loss',
                                      verbose=0,
                                      save_best_only=False,
                                      save_weights_only=False,
                                      mode='auto',
                                      period=1)

    #Save best models to disk during training
    weights_name_best = "model_and_weights-{epoch:02d}-{val_loss:.2f}.h5"
    checkpoint_best = ModelCheckpoint(weights_path + weights_name_best,
                                      verbose=1,
                                      monitor='val_loss',
                                      save_weights_only=False,
                                      save_best_only=True,
                                      mode='auto')

    # TensorBoard callback
    log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(
        log_dir=log_dir,
        histogram_freq=1,
        write_images=True,
    )

    generator_nn.summary()
    #Training
    history = generator_nn.fit(
        X_train,
        y_train,
        epochs=epochs,
        batch_size=batch_size,
        shuffle=True,
        callbacks=[checkpoint_each, checkpoint_best, tensorboard_callback],
        verbose=1,
        validation_data=(X_test, y_test))

    #Plot training and validation loss (log scale)
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(1, len(loss) + 1)

    plt.plot(epochs, loss, label='Training loss')
    plt.plot(epochs, val_loss, label='Validation loss')
    plt.yscale('log')
    plt.title('Training and validation loss')
    plt.legend()
    plt.show()
Ejemplo n.º 5
0
                                     val_path=val_path,
                                     image_folder='images',
                                     mask_folder='groundtruth',
                                     target_size=(400, 400),
                                     seed=1)

print("Build model and training...")

# Build models
print("Build the standard Seg-Net model ...")
model_segnet_1 = segnet(n_filter=32, activation='relu', loss=dice_loss)

print("Build the standard U-Net model ...")
model_unet_1 = unet(n_filter=32,
                    activation='relu',
                    dropout=True,
                    dropout_rate=0.2,
                    loss=dice_loss)

print("Build the U-Net model with dilated block...")
model_dunet_1 = unet_dilated(n_filter=32,
                             activation='relu',
                             dropout=True,
                             dropout_rate=0.2,
                             loss=dice_loss)

print("Build the U-Net model with attention gate...")
model_unet_Attention_1 = unet_Attention(n_filter=32,
                                        dropout=True,
                                        dropout_rate=0.2,
                                        activation='relu',
Ejemplo n.º 6
0
def training(data_noise_dir, data_voice_dir, spectrogram_dir, weights_dir,
             model_name, training_from_scratch, epochs, batch_size):

    for i in range(10):
        if i == 0:
            training_from_scratch = True
        else:
            training_from_scratch = False
        x_in = np.load(
            spectrogram_dir + str(i) +
            f'{data_voice_dir}_{data_noise_dir}_noisy_voice_amp_db' + ".npy")
        x_out = np.load(spectrogram_dir + str(i) +
                        f'{data_voice_dir}_voice_amp_db' + ".npy")

        x_out = x_in - x_out

        print(stats.describe(x_in.reshape(-1, 1)))
        print(stats.describe(x_out.reshape(-1, 1)))

        x_in = scaled_in(x_in)
        x_out = scaled_out(x_out)

        print(x_in.shape)
        print(x_out.shape)

        print(stats.describe(x_in.reshape(-1, 1)))
        print(stats.describe(x_out.reshape(-1, 1)))

        x_in = x_in[:, :, :]
        x_in = x_in.reshape(x_in.shape[0], x_in.shape[1], x_in.shape[2], 1)
        x_out = x_out[:, :, :]
        x_out = x_out.reshape(x_out.shape[0], x_out.shape[1], x_out.shape[2],
                              1)

        x_train, x_test, y_train, y_test = train_test_split(x_in,
                                                            x_out,
                                                            test_size=0.10,
                                                            random_state=42)

        if training_from_scratch:

            generator_nn = unet()
        else:

            generator_nn = unet(pretrained_weights=weights_dir + model_name +
                                '.h5')

        checkpoint = ModelCheckpoint(weights_dir + model_name + '.h5',
                                     verbose=1,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     mode='auto')

        generator_nn.summary()
        history = generator_nn.fit(x_train,
                                   y_train,
                                   epochs=epochs,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   callbacks=[checkpoint],
                                   verbose=1,
                                   validation_data=(x_test, y_test))
        model_json = generator_nn.to_json()
        with open(f"{weights_dir + model_name}.json", "w") as json_file:
            json_file.write(model_json)
Ejemplo n.º 7
0
from model_unet import unet
from keras_datagenh5 import DataGeneratorH5
import keras
from keras.optimizers import Adam

model = unet(rows=480, cols=640, channels=1)

bsize = 5
steps = 3500 / bsize
valsteps = 500 / bsize
num_epochs = 20

train_generator = DataGeneratorH5('train_warped.h5',
                                  'train_notwarped.h5',
                                  batch_size=bsize,
                                  rescale=1 / 255)
val_generator = DataGeneratorH5('test_warped.h5',
                                'test_notwarped.h5',
                                batch_size=bsize,
                                rescale=1 / 255)

callbacks = [
    keras.callbacks.TensorBoard(log_dir="log",
                                histogram_freq=0,
                                write_graph=True,
                                write_images=False),
    keras.callbacks.ModelCheckpoint('log/model_20epochs.h5',
                                    verbose=0,
                                    save_weights_only=True)
]
adam = Adam(lr=0.00005,