Beispiel #1
0
def model(train_gen, valid_gen):

    n_classes = 196
    steps_per_epoch = 204
    validation_steps = 51
    channels = 3
    output = "label_bbox"

    # Tensorboard
    root_logdir = os.path.join(os.curdir, 'logs')
    run_id = time.strftime(f"run_%Y_%m_%d-%H_%M_%S")
    run_logdir = os.path.join(root_logdir, run_id)
    tensorboard = TensorBoard(run_logdir)

    # Early Stopping
    early_stopping = EarlyStopping(patience=15, restore_best_weights=True)

    # Model Checkpoints
    checkpoint = ModelCheckpoint(
        filepath=f'checkpoints/' + \
        'epoch.{epoch:02d}_val_loss.{val_loss:.6f}.h5',
        verbose=1, save_best_only=True)

    #Reduce LR on Plateau
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.3,
                                  patience=5,
                                  min_lr=1e-4,
                                  verbose=1)

    callbacks = [tensorboard, early_stopping, checkpoint, reduce_lr]

    DefaultConv2D = partial(
        Conv2D,
        kernel_size=3,
        kernel_initializer="he_normal",
        kernel_regularizer=l2({{uniform(1e-5, 1)}}),
        # activation="relu",
        padding="same")

    input = Input(shape=(224, 224, channels))

    # =============================================================================
    #     conv_1a = DefaultConv2D(filters=64,
    #                             kernel_size=5,
    #                             strides=2)(input)
    #     norm_1a = BatchNormalization()(conv_1a)
    #     relu_1a = Activation(activation="relu")(norm_1a)
    #
    #     max_pool_1 = MaxPooling2D(pool_size=2)(relu_1a)
    #
    #     conv_1b = DefaultConv2D(filters=64)(max_pool_1)
    #     norm_1b = BatchNormalization()(conv_1b)
    #     relu_1b = Activation(activation="relu")(norm_1b)
    #     conv_1c = DefaultConv2D(filters=64)(relu_1b)
    #     norm_1c = BatchNormalization()(conv_1c)
    #     relu_1c = Activation(activation="relu")(norm_1c)
    #
    #     max_pool_2 = MaxPooling2D(pool_size=2)(relu_1c)
    #
    #     conv_2a = DefaultConv2D(filters=128)(max_pool_2)
    #     norm_2a = BatchNormalization()(conv_2a)
    #     relu_2a = Activation(activation="relu")(norm_2a)
    #     conv_2b = DefaultConv2D(filters=128)(relu_2a)
    #     norm_2b = BatchNormalization()(conv_2b)
    #     relu_2b = Activation(activation="relu")(norm_2b)
    #
    #     max_pool_3 = MaxPooling2D(pool_size=2)(relu_2b)
    #
    #     flatten = Flatten()(max_pool_3)
    # =============================================================================

    choice_activation = {{choice(['relu', 'leaky_relu'])}}

    conv = DefaultConv2D(filters=64, strides=2)(input)
    norm = BatchNormalization()(conv)
    if choice_activation == 'relu':
        act = Activation(activation="relu")(norm)
    else:
        act = LeakyReLU(0.2)(norm)
    conv = DefaultConv2D(filters=64)(act)
    norm = BatchNormalization()(conv)
    if choice_activation == 'relu':
        act = Activation(activation="relu")(norm)
    else:
        act = LeakyReLU(0.2)(norm)

    choice_pool = {{choice(['max', 'avg'])}}
    if choice_pool == 'max':
        x = MaxPooling2D(pool_size=2)(act)
    else:
        x = AveragePooling2D(pool_size=2)(act)

    for filters in [64] * 3:
        x = Residual(filters, activation=choice_activation)(x)

    res_block_depth_1 = {{choice([2, 3])}}
    x = Residual(128, strides=2, activation=choice_activation)(x)
    for filters in [128] * res_block_depth_1:
        x = Residual(filters, activation=choice_activation)(x)

    res_block_depth_2 = {{choice([4, 5])}}
    x = Residual(256, strides=2, activation=choice_activation)(x)
    for filters in [256] * res_block_depth_2:
        x = Residual(filters, activation=choice_activation)(x)

    x = Residual(512, strides=2, activation=choice_activation)(x)
    for filters in [512] * 2:
        x = Residual(filters, activation=choice_activation)(x)

    x = GlobalAvgPool2D()(x)

    drop = Dropout({{uniform(0, .5)}})(x)
    dense = Dense(512)(drop)
    norm = BatchNormalization()(dense)
    if choice_activation == 'relu':
        act = Activation(activation="relu")(norm)
    else:
        act = LeakyReLU(0.2)(norm)

    # If we choose 'one', add an additional dense layer
    choice_dense = {{choice(['yes', 'no'])}}
    if choice_dense == 'yes':
        drop = Dropout({{uniform(0, .5)}})(act)
        dense = Dense(256)(drop)
        norm = BatchNormalization()(dense)
        if choice_activation == 'relu':
            act = Activation(activation="relu")(norm)
        else:
            act = LeakyReLU(0.2)(norm)

    drop = Dropout({{uniform(0, .5)}})(act)
    class_output = Dense(n_classes, activation="softmax", name="labels")(drop)
    bbox_output = Dense(units=4, name="bbox")(drop)

    # Optimizer
    sgd = SGD(lr={{uniform(.0001, .3)}})
    adam = Adam(lr={{uniform(.0001, .3)}})

    choice_opt = {{choice(['adam', 'sgd'])}}
    if choice_opt == 'adam':
        optimizer = adam
    else:
        optimizer = sgd

    model = None
    if output == "bbox":
        model = Model(inputs=input, outputs=bbox_output)
        model.compile(loss="msle", optimizer=optimizer, metrics=["accuracy"])

    elif output == "label":
        model = Model(inputs=input, outputs=class_output)
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=["accuracy"])
    else:
        model = Model(inputs=input, outputs=[class_output, bbox_output])
        model.compile(loss=["categorical_crossentropy", "msle"],
                      loss_weights=[.8, .2],
                      optimizer=optimizer,
                      metrics=["accuracy"])
    model.fit(train_gen,
              epochs=400,
              steps_per_epoch=steps_per_epoch,
              validation_data=valid_gen,
              validation_steps=validation_steps,
              callbacks=callbacks,
              verbose=1)

    # =============================================================================
    #     #print(model.metrics_names)
    #     # ['loss', 'labels_loss', 'bbox_loss', 'labels_acc', 'bbox_acc']
    #     validation_loss = np.amin(result.history['loss'])
    #     print('Best validation loss of epoch:', validation_loss)
    # =============================================================================

    results = model.evaluate_generator(valid_gen,
                                       steps=validation_steps,
                                       verbose=0)
    validation_loss = results[0]
    print('val_labels_acc', results[3])
    print('val_bbox_acc', results[4])
    print('Validation Loss:', validation_loss)

    return {'loss': validation_loss, 'status': STATUS_OK, 'model': model}
Beispiel #2
0
regressor.add(Dropout(0.0))

regressor.add(LSTM(units=25, return_sequences=True))
regressor.add(Dropout(0.0))

regressor.add(LSTM(units=10))
regressor.add(Dropout(0.0))

regressor.add(Dense(units=1, activation='sigmoid'))

regressor.compile(optimizer='RMSprop',
                  loss='mean_squared_error',
                  metrics=['mean_squared_error'])
#
es = EarlyStopping(monitor='loss', min_delta=1e-6, patience=10, verbose=1)
rlr = ReduceLROnPlateau(monitor='loss', factor=0.01, patience=5, verbose=1)
mcp = ModelCheckpoint(filepath='pesos.h5',
                      monitor='loss',
                      save_best_only=True,
                      verbose=1)
regressor.fit(previsores,
              preco_real,
              epochs=100,
              batch_size=32,
              callbacks=[es, rlr, mcp])
#
preco_real_teste = base_teste.iloc[:, 3:4].values
frames = [base_teste, base_treinamento]
base_completa = pd.concat(frames)
#
entradas = base_completa[len(base_completa) - len(base_teste) - 90:].values
Beispiel #3
0
    
    class_num = 50
    batch_size = 16
    
    x_train, y_train, x_val, y_val, x_test, y_test = utils.split_dataset()
    
    train_sequence = utils.Train_Sequence(x_train, y_train, batch_size)
    val_sequence = utils.Val_Sequence(x_val, y_val, batch_size)
    #test_sequence = utils.Val_Sequence(x_test, y_test, batch_size)
    
    model = build_model(class_num)    
    
    lr_scheduler = LearningRateScheduler(lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=3,
                                   min_lr=1e-6)
                                                      
    earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=5, verbose=1, mode='auto')

    model.compile(loss='categorical_crossentropy', 
                  optimizer=optimizers.SGD(lr=lr_schedule(0), momentum=0.6, nesterov=True), 
                  metrics=["accuracy"])
              
    model.summary()

    callbacks = [lr_scheduler, lr_reducer]
    
    history = model.fit_generator(train_sequence,
                                  steps_per_epoch=int(np.ceil(x_train.shape[0] / batch_size)),
                                  epochs=150,
# autoencoder = encoder + decoder
# instantiate autoencoder model
autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder')
autoencoder.summary()

# prepare model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'colorized_ae_model.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)

# reduce learning rate by sqrt(0.1) if the loss does not improve in 5 epochs
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               verbose=1,
                               min_lr=0.5e-6)

# save weights for future use (e.g. reload parameters w/o training)
checkpoint = ModelCheckpoint(filepath=filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True)

# Mean Square Error (MSE) loss function, Adam optimizer
autoencoder.compile(loss='mse', optimizer='adam')

# called every epoch
callbacks = [lr_reducer, checkpoint]
Beispiel #5
0
)

model.summary()
# model.trainable = False

model.save('C:/nmb/nmb_data/h5/5s/speech_vgg/speechvgg_sgd_1.h5')

# 컴파일, 훈련
op = SGD(lr=1e-3)
batch_size = 16

es = EarlyStopping(monitor='val_loss',
                   patience=20,
                   restore_best_weights=True,
                   verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/nmb/nmb_data/h5/5s/speech_vgg/speechvgg_sgd_1.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)
tb = TensorBoard(log_dir='C:/study/graph/' +
                 start_now.strftime("%Y%m%d-%H%M%S") + "/",
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=1000,
                    batch_size=batch_size,
Beispiel #6
0
                                    graph_scale=1)
train_generator.get_config()

model = StackedDenseNet(train_generator, n_stacks=2, growth_rate=32, pretrained=True)

#model = DeepLabCut(train_generator, backbone="resnet50")
#model = DeepLabCut(train_generator, backbone="mobilenetv2", alpha=0.35) # Increase alpha to improve accuracy
#model = DeepLabCut(train_generator, backbone="densenet121")

#model = LEAP(train_generator)
#model = StackedHourglass(train_generator)

model.get_config()

logger = Logger(validation_batch_size=10)
reduce_lr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, verbose=1, patience=20)

model_checkpoint = ModelCheckpoint('/n/home10/abahl/engert_storage_armin/maxwell_paper/deepposekit_training/my_best_model.h5',
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    verbose=1,
    save_best_only=True,
)

early_stop = EarlyStopping(
    monitor="val_loss",
    # monitor="loss" # use if validation_split=0
    min_delta=0.001,
    patience=100,
    verbose=1
)
Beispiel #7
0
    def __init__(self,
                 stop_patience=10,
                 lr_factor=0.5,
                 lr_patience=1,
                 lr_epsilon=0.001,
                 lr_cooldown=4,
                 lr_minimum=1e-5,
                 outputDir='',
                 debug=1):

        self.nl_begin = newline_callbacks_begin(outputDir)
        self.nl_end = newline_callbacks_end()

        self.stopping = EarlyStopping(monitor='val_loss',
                                      patience=stop_patience,
                                      verbose=debug,
                                      mode='min')

        self.reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                           factor=lr_factor,
                                           patience=lr_patience,
                                           mode='min',
                                           verbose=debug,
                                           epsilon=lr_epsilon,
                                           cooldown=lr_cooldown,
                                           min_lr=lr_minimum)

        self.modelbestcheck = ModelCheckpoint(outputDir +
                                              "/KERAS_check_best_model.h5",
                                              monitor='val_loss',
                                              verbose=debug,
                                              save_best_only=True)

        self.modelbestcheckweights = ModelCheckpoint(
            outputDir + "/KERAS_check_best_model_weights.h5",
            monitor='val_loss',
            verbose=debug,
            save_best_only=True,
            save_weights_only=True)

        self.modelcheckperiod = ModelCheckpoint(
            outputDir + "/KERAS_check_model_epoch{epoch:02d}.h5",
            verbose=debug,
            period=10)

        self.modelcheck = ModelCheckpoint(outputDir +
                                          "/KERAS_check_model_last.h5",
                                          verbose=debug)

        self.modelcheckweights = ModelCheckpoint(
            outputDir + "/KERAS_check_model_last_weights.h5",
            verbose=debug,
            save_weights_only=True)

        self.tb = TensorBoard(log_dir=outputDir + '/logs')

        self.history = History()
        self.timer = Losstimer()

        # self.callbacks=[
        #     self.nl_begin,
        #     self.modelbestcheck,self.modelbestcheckweights, self.modelcheck,self.modelcheckweights,self.modelcheckperiod,
        #     self.reduce_lr, self.stopping, self.nl_end, self.tb, self.history,
        #     self.timer
        # ]
        self.callbacks = [
            self.nl_begin, self.modelbestcheck, self.modelbestcheckweights,
            self.reduce_lr, self.stopping, self.nl_end, self.history,
            self.timer
        ]
Beispiel #8
0
import tensorflow as tf
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.models import load_model

x_train, x_test, y_train, y_test = train_test_split(load_breast_cancer().data, load_breast_cancer().target, train_size = 0.8, random_state = 77)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)

model = ak.StructuredDataRegressor(overwrite = True,
                                   max_trials = 1,
                                   loss = 'mse',
                                   metrics = ['acc'])

es = EarlyStopping(monitor = 'val_loss', mode = 'min', patience=6)
lr = ReduceLROnPlateau(monitor = 'val_loss', patience=3, factr = 0.5, verbose = 2)
model.fit(x_train, y_train, epochs = 1, callbacks = [es, lr], validation_split = 0.2)

# SAVE Best Model
# model = model.export_model()
best_model = model.tuner.get_best_model()
best_model.save('C:/data/h5/best_cancer.h5')

# LOAD Best Model
best_model = load_model('C:/data/h5/best_cancer.h5')
results = best_model.evaluate(x_test, y_test)
print('results: ', results)
best_model.summary()
def main(_argv):
    # 使用GPU
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)

    if FLAGS.tiny:  # tiny模型
        model = YoloV3Tiny(FLAGS.size,
                           training=True,
                           classes=FLAGS.num_classes)
        anchors = yolo_tiny_anchors  # 锚框
        anchor_masks = yolo_tiny_anchor_masks  # 锚框对应的索引值

    if FLAGS.dataset:  # 读取训练数据集
        train_dataset = dataset.load_tfrecord_dataset(FLAGS.dataset,
                                                      FLAGS.classes,
                                                      FLAGS.size)
    # 用来打乱数据集中数据顺序,训练时非常常用,取所有数据的前buffer_size数据项
    train_dataset = train_dataset.shuffle(buffer_size=512)
    # 设置批次,按照顺序取出batch_size行数据,最后一次输出可能小于batch
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    # 训练数据,格式(x,y)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    # 预先载入buffer_size项
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    if FLAGS.val_dataset:  # 读取验证数据集
        val_dataset = dataset.load_tfrecord_dataset(FLAGS.val_dataset,
                                                    FLAGS.classes, FLAGS.size)
    # 设置批次,按照顺序取出batch_size行数据,最后一次输出可能小于batch
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    # 验证数据,格式(x,y)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    # 优化器
    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    # 损失函数
    loss = [
        YoloLoss(anchors[mask], classes=FLAGS.num_classes)
        for mask in anchor_masks
    ]

    # 编译模型
    model.compile(optimizer=optimizer, loss=loss, run_eagerly=False)
    # 回调函数列表
    callbacks = [
        ReduceLROnPlateau(verbose=1),
        EarlyStopping(patience=3, verbose=1),
        ModelCheckpoint('checkpoints/yolov3_tiny_train_{epoch}.tf',
                        verbose=1,
                        save_weights_only=True),
        TensorBoard(log_dir='logs')
    ]
    # 训练模型
    history = model.fit(train_dataset,
                        epochs=FLAGS.epochs,
                        callbacks=callbacks,
                        validation_data=val_dataset)
Beispiel #10
0
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
#model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

optimizer = tf.keras.optimizers.Adam()
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
                                            patience=2,
                                            verbose=1,
                                            factor=0.2,
                                            min_lr=0.00001)
ckpt = ModelCheckpoint('cnn_model_adam.h5',
                       verbose=1,
                       save_weights_only=True,
                       save_best_only=True)
epochs = 50
batch_size = 86

model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=["accuracy"])

train_datagen = ImageDataGenerator(featurewise_center=False,
                                   featurewise_std_normalization=False,
Beispiel #11
0
Ghr_0 = G_0 * hr
Ghr = np.reshape(Ghr_0, (total_num, -1))

train_dataset = np.concatenate(
    (np.real(Ghr), np.imag(Ghr), np.real(hd), np.imag(hd)), axis=-1)
scaler = preprocessing.StandardScaler().fit(train_dataset)
train_dataset = scaler.transform(train_dataset)

best_model_path = './models/bigscale/best_%d_and_%d.h5' % (M, N)
checkpointer = ModelCheckpoint(
    best_model_path, verbose=1, save_best_only=True,
    save_weights_only=True)  #True for Lambda layer usage
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.333,
                              patience=10,
                              verbose=1,
                              mode='auto',
                              min_delta=0.0001,
                              min_lr=0.00001)
early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0.0001,
                               patience=20)

model = beamformer_LIS(M, N, lr)
model.fit(train_dataset,origin_dataset,epochs=epochs,batch_size=batch_size,verbose=1,shuffle=True,\
                       validation_split=0.2,callbacks=[checkpointer,early_stopping,reduce_lr])

#%% testing
test_num = 10000

d0_test = np.random.uniform(0, 8, test_num)
Beispiel #12
0
def get_callbacks():
    # return [EarlyStopping(patience=20), ReduceLROnPlateau()]
    return [ReduceLROnPlateau()]
model.add(Dropout(0.3))

model.add(Flatten())

model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))

model.add(Dense(10, activation='softmax'))

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
early_stopping = EarlyStopping(monitor='val_loss', patience=10, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])
model.fit(x_train,
          y_train,
          validation_split=0.2,
          batch_size=20,
          epochs=100,
          callbacks=[reduce_lr, early_stopping])

loss = model.evaluate(x_test, y_test)
print('loss, acc : ', loss)

# loss, acc :  [0.5301111340522766, 0.8363000154495239]
Beispiel #14
0
from tensorflow.keras.layers import Embedding, Dense, LSTM, Conv2D, Flatten, BatchNormalization, Dropout
model = Sequential()
#model.add(Embedding(input_dim = 10000, output_dim = 230, input_length = 100))
model.add(LSTM(128, input_shape = (100, 1)))
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

#3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
es = EarlyStopping(monitor = 'val_loss', patience = 30, verbose= 1, mode = 'auto')
rl = ReduceLROnPlateau(monitor='val_loss', patience = 20, factor = 0.3, verbose = 1, mode = 'auto')
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['acc'])
history = model.fit(x_train, y_train, epochs = 200, batch_size = 128, validation_data = (x_val, y_val), verbose = 1 ,callbacks = [es, rl])


#4.평가, 예측
loss, acc = model.evaluate(x_test, y_test)
print("loss : ", loss)
print("acc : ", acc)

# loss :  1.9248653650283813
# acc :  0.8248800039291382

#시각화
epochs = range(1, len(history.history['acc']) + 1)
plt.plot(epochs, history.history['loss'])
Beispiel #15
0
    verbose=1,
    mode='min',
    restore_best_weights=True
)

mcp_save = ModelCheckpoint(
    os.path.join(MODEL_DIR, 'fold_' + str(FOLD) + '_epoch_{epoch:02d}'),
    save_best_only=True,
    monitor='val_accuracy',
    mode='max',
    verbose=1
)
reduce_lr_loss = ReduceLROnPlateau(
    monitor='val_loss',
    factor=0.1,
    patience=7,
    verbose=1,
    epsilon=1e-4,
    mode='min'
)
# To show balanced accuracy
metrics = Metrics((testX, testY), model)

# compile model
print('Compiling model...')
opt = Adam(lr=LR, decay=LR / EPOCHS)
loss = (
    tf.keras.losses.CategoricalCrossentropy() if not LOG_SOFTMAX else (
        lambda labels, targets: tf.reduce_mean(
            tf.reduce_sum(
                -1 * tf.math.multiply(tf.cast(labels, tf.float32), targets),
                axis=1
Beispiel #16
0
        Y_val = Y[val, ...]

        X_unused = X[unused_samples, ...]
        Y_unused = Y[unused_samples, ...]

        model = get_model()

        model_name = "model.h5"

        check = ModelCheckpoint(filepath=model_name,
                                monitor="val_acc",
                                save_best_only=True,
                                verbose=1,
                                save_weights_only=True)
        reduce = ReduceLROnPlateau(monitor="val_acc",
                                   patience=30,
                                   verbose=1,
                                   min_lr=1e-7)

        early = EarlyStopping(patience=40, monitor="val_acc")

        model.fit(X_used,
                  Y_used,
                  epochs=120,
                  verbose=1,
                  batch_size=32,
                  validation_data=(X_val, Y_val),
                  callbacks=[check, reduce, early])

        model.load_weights(model_name)

        pred_test = model.predict(X_test, batch_size=1024)
Beispiel #17
0

parameters = param()

model = KerasClassifier(build_fn=dnn_model, verbose=1)

search = RandomizedSearchCV(model, parameters, cv=3)
# search = GridSearchCV(model, parameters, cv=3)

filepath = '../data/modelcheckpoint/k62_iris_{epoch:02d}-{val_loss:.4f}.hdf5'
es = EarlyStopping(monitor='val_loss', patience=10, mode='auto')
cp = ModelCheckpoint(filepath=filepath,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='auto')
lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, mode='auto')
search.fit(x_train,
           y_train,
           validation_split=0.2,
           verbose=1,
           epochs=100,
           callbacks=[es, lr])  # cp

print(search.best_params_)
print(search.best_estimator_)
print(search.best_score_)  # 밑의 score 랑 다르다.
acc = search.score(x_test, y_test)
print('최종 acc :', acc)

# {'optimizer': 'nadam', 'node': 256, 'drop': 0.2, 'batch_size': 20, 'activation': 'selu'}
# search.best_score_ : 0.9435579180717468
model.add(GlobalMaxPooling1D())
model.add(Dropout(0.5))

model.add(Flatten())
model.add(Dropout(0.5))

model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

epochs = 100
batch_size = 32

history = model.fit(train_padded, training_labels, shuffle=True ,
                    epochs=epochs, batch_size=batch_size, 
                    validation_split=0.2,
                    callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=10, min_lr=0.0001), 
                               EarlyStopping(monitor='val_loss', mode='min', patience=10, verbose=1),
                               EarlyStopping(monitor='val_accuracy', mode='max', patience=10, verbose=1)])

plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show();

plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show();
train_generator = train_datagen.flow_from_directory(trainingdir,
                                                    target_size=(150, 150),
                                                    batch_size=20,
                                                    class_mode='binary')

validation_generator = validation_datagen.flow_from_directory(
    validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
from keras.callbacks import ModelCheckpoint, EarlyStopping
# Callbacks
checkpoint = ModelCheckpoint(filepath='best_weights.hdf5',
                             save_best_only=True,
                             save_weights_only=True)
lr_reduce = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.3,
                              patience=2,
                              verbose=2,
                              mode='max')
early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0.1,
                           patience=1,
                           mode='min')

history = model.fit_generator(train_generator,
                              epochs=20,
                              steps_per_epoch=5126 // 32,
                              validation_steps=624 // 32,
                              validation_data=validation_generator,
                              verbose=1,
                              callbacks=[checkpoint, early])
"""**fitting the model2**"""
Beispiel #20
0
def train_survival(base_path: str,
                   excel_path: str,
                   create_csv: bool,
                   training_model_folder: str,
                   epochs: int,
                   mask: bool,
                   mixed_data: bool,
                   dict_struct_data: dict = None,
                   mixed_data_info: list = None) -> dict:
    """
    Retrieve data (niftis/survival data) and trains the neural network for survival analysis with the given parameters

    Args: 
        base_path (str): path to the directory containing the nifti files (CT and PT scans)
        excel_path (str): file's path to the excel containing the survival information and structured data
        create_csv (bool): file's path to save the data retrieved. 
        training_model_folder (str): path to the directory where the training information will be saved
        epochs (int): number of epochs of the training
        mask (bool): True if mask is part of the input data (combined with PT and CT scans). 
        mixed_data (bool): True if structured data added to image data. Default to False
        dict_struct_data (dict): dictionary associating criteria to the columns number of the excel file containg the criteria. Default to None.
        mixed_data_info (list): list containing the different criteria that can be used. Default to None.
    
    Returns: 
        (dict): return a dictionnary containing the different loss and metrics calculated during the training
    """

    now = datetime.now().strftime("%Y%m%d-%H:%M:%S")
    mixed_data_columns = []  #automatic filling
    for i in mixed_data_info:
        mixed_data_columns.append(dict_struct_data[i])

    logdir = os.path.join(
        training_model_folder,
        'logs')  #path for logs of the trained model (tensorboard)
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    ##########################   PARAMETERS INPUT DATA   ##########################

    reduced_size = 0.5  # needed if batch_size to big for GPU (divided images size by 2 => reduced_size == 0.5) => standard size: (256,128,128) (reduced_size == 0)
    val_size = 0.20  #0.2 => 20% of the data is for the validation
    last_layer = "softmax"  #last laayer of the neural network

    ##########################   IMAGE PROCESSING PARAMETERS   ##########################

    modalities = ('pet_img', 'ct_img')  #input neural network ct and pet image
    target_direction = (1, 0, 0, 0, 1, 0, 0, 0, 1)
    target_size = (128, 128, 256)
    target_spacing = (4.0, 4.0, 4.0)
    if reduced_size != 0:
        target_size = tuple(round(x * reduced_size) for x in target_size)
        target_spacing = tuple(round(x / reduced_size) for x in target_spacing)
    ##########################   DATA MANAGER  ##########################

    DM = DataManagerSurvival(base_path,
                             excel_path,
                             mask,
                             mixed_data,
                             mixed_data_columns,
                             pod24=False)
    DM.get_data_survival(create_csv)
    train_dataset, val_dataset = DM.dataset_survival(modalities, mask,
                                                     target_size,
                                                     target_spacing,
                                                     target_direction)
    batch_size_train = DM.batch_size_train
    num_train_data = DM.num_train_data
    batch_size_val = DM.batch_size_val
    num_val_data = DM.num_val_data
    time_horizon = DM.time_horizon  #number of neurons on the last layer
    censure_val = DM.censure_val

    ##########################   MODEL LOSS METRICS OPTIMIZER  ##########################

    alpha = 0.6  #cross_entropy loss factor
    beta = 0.4  # ranking loss factor

    # definition of loss, optimizer and metrics
    optimizer = tfa.optimizers.AdamW(learning_rate=1e-4, weight_decay=1e-4)
    loss = get_loss_survival(time_horizon_dim=time_horizon,
                             alpha=alpha,
                             beta=beta)
    c_index = metric_cindex(time_horizon_dim=time_horizon)
    metrics = [c_index]
    #loss = get_loss_cox
    #metrics = [concordance_index_censored_cox]

    #c_index_weighted= metric_cindex_weighted(time_horizon_dim=time_horizon,batch_size=batch_size, y_val=y_val)
    #td_c_index = metric_td_c_index(time_horizon_dim=time_horizon, batch_size=batch_size)

    ##########################   CALLBACKS PARAMETERS  ##########################
    patience = 10
    ReduceLROnPlateau1 = False
    EarlyStopping1 = False
    ModelCheckpoint1 = True
    TensorBoard1 = True

    callbacks = []
    if ReduceLROnPlateau1 == True:
        # reduces learning rate if no improvement are seen
        learning_rate_reduction = ReduceLROnPlateau(monitor=loss,
                                                    patience=patience,
                                                    verbose=1,
                                                    factor=0.5,
                                                    min_lr=0.0000001)
        callbacks.append(learning_rate_reduction)

    if EarlyStopping1 == True:
        # stop training if no improvements are seen
        early_stop = EarlyStopping(monitor="val_loss",
                                   mode="min",
                                   patience=int(patience // 2),
                                   restore_best_weights=True)
        callbacks.append(early_stop)

    if ModelCheckpoint1 == True:
        # saves model weights to file
        # 'model_weights.{epoch:02d}-{val_loss:.2f}.hdf5'
        checkpoint = ModelCheckpoint(
            os.path.join(training_model_folder, 'model_weights.h5'),
            monitor='val_loss',  #td_c_index ?
            verbose=1,
            save_best_only=True,
            mode='min',  # max
            save_weights_only=False)
        callbacks.append(checkpoint)

    if TensorBoard1 == True:
        tensorboard_callback = TensorBoard(log_dir=logdir,
                                           histogram_freq=0,
                                           update_freq='epoch',
                                           write_graph=True,
                                           write_images=True)
        callbacks.append(tensorboard_callback)

    ##########################   DEFINITION MODEL  ##########################

    dim_mlp = len(
        mixed_data_columns)  #dim input layer structured data neural network
    in_channels = len(modalities)
    if mask:
        in_channels += 1
    out_channels = 1
    channels_last = True
    keep_prob = 1
    keep_prob_last_layer = 0.8
    kernel_size = (5, 5, 5)
    num_channels = 12

    num_levels = 4
    num_convolutions = (1, 2, 3)
    num_levels = len(num_convolutions)
    bottom_convolutions = 1
    activation = "relu"
    image_shape = (256, 128, 128)
    if reduced_size != 0:
        image_shape = tuple(round(x * reduced_size) for x in image_shape)

    if mixed_data:
        architecture = 'vnet_survival_mixed_data'
    else:
        architecture = 'vnet_survival'

    model = VnetSurvival(image_shape,
                         in_channels,
                         out_channels,
                         time_horizon,
                         channels_last,
                         keep_prob,
                         keep_prob_last_layer,
                         kernel_size,
                         num_channels,
                         num_levels,
                         num_convolutions,
                         bottom_convolutions,
                         activation,
                         last_layer,
                         activation,
                         mixed_data,
                         dim_mixed_data_input=dim_mlp).create_network()

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    print(model.summary())

    model_json = model.to_json()
    with open(
            os.path.join(
                training_model_folder,
                'architecture_{}_model_{}.json'.format(architecture, now)),
            "w") as json_file:
        json_file.write(model_json)

    ##########################   TRAINING MODEL  ##########################

    history = model.fit(
        train_dataset,
        steps_per_epoch=int(batch_size_train / num_train_data),
        validation_data=val_dataset,
        validation_steps=int(batch_size_val / num_val_data),
        epochs=epochs,
        callbacks=callbacks,  # initial_epoch=0,
        verbose=1)

    ###########################     RESULTS       ##########################
    loss_and_metrics = {}
    censure_val = []
    print(history.history.keys())
    print(f'Loss :')
    index_loss_val = np.argmin(history.history['val_loss'])

    for i in history.history.keys():
        loss_and_metrics[i] = []
        if history.history[i][index_loss_val] != -1:
            loss_and_metrics[i].append(history.history[i][index_loss_val])
        print(i + " : " + str(history.history[i][index_loss_val]))

    x_axis = np.arange(0, len(history.history['loss']))

    fig, (ax1, ax2) = plt.subplots(1, 2)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['validation', 'training'])
    fig.suptitle('Loss and accuracy plot')
    ax1.plot(x_axis, history.history['val_loss'], x_axis,
             history.history['loss'])
    ax2.plot(x_axis, history.history['val_cindex'], x_axis,
             history.history['cindex'])
    #plot when loss and metrics are based on cox
    #ax1.plot(x_axis, history.history['val_loss'],x_axis, history.history['loss'])
    #ax2.plot(x_axis, history.history['val_concordance_index_censored_cox'], x_axis, history.history['concordance_index_censored_cox'])
    #plt.plot(x_axis, history.history['val_loss'],x_axis, history.history['loss'])
    #plt.title('Loss and accuracy model no '+str(fold_no))
    #fig.legend(['val_loss', 'loss', 'val_accuracy', 'accuracy'])
    for i in history.history.keys():
        if history.history[i][index_loss_val] != -1:
            loss_and_metrics[i].append(history.history[i][index_loss_val])
        print(i + " : " + str(history.history[i][index_loss_val]))

    create_info_file(loss_and_metrics, training_model_folder, mask, mixed_data,
                     mixed_data_info, batch_size_train, batch_size_val, epochs,
                     reduced_size, alpha, beta, censure_val, num_convolutions)
    plt.savefig(training_model_folder + '/loss.png')
    plt.show()
    return loss_and_metrics
Beispiel #21
0
start_time = datetime.now().strftime('%Y%m%d_%H%M%S')
os.makedirs('models', exist_ok=True)

model.fit(x=x_train,
          y=y_train,
          batch_size=256,
          epochs=10,
          callbacks=[
              ModelCheckpoint('./models/%s.h5' % (start_time),
                              monitor='val_acc',
                              verbose=1,
                              save_best_only=True,
                              mode='auto'),
              ReduceLROnPlateau(monitor='val_acc',
                                factor=0.2,
                                patience=5,
                                verbose=1,
                                mode='auto')
          ],
          validation_data=(x_val, y_val),
          use_multiprocessing=True,
          workers=16)

i = 0

for y in range(h):
    for x in range(w):
        print('%2d' % x_val[i][y, x], end='')
    print()

y_pred = model.predict(np.expand_dims(x_val[i], axis=0)).squeeze()
Beispiel #22
0
def cross_validation_survival(K: int,
                              base_path: str,
                              excel_path: str,
                              create_csv: bool,
                              training_model_folder: str,
                              epochs: int,
                              mask: bool,
                              mixed_data: bool,
                              dict_struct_data: dict = None,
                              mixed_data_info: list = None) -> dict:
    """
    Retrieve data (niftis/survival data) and trains the neural network for survival analysis with the given parameters using cross validation : 
    resampling procedure used to evaluate machine learning models on a limited data sample.

    Args: 
        K (int): K refers to the number of groups that a given data sample is to be split into.
        base_path (str): path to the directory containing the nifti files (CT and PT scans)
        excel_path (str): file's path to the excel containing the survival information and structured data
        create_csv (bool): file's path to save the data retrieved. 
        training_model_folder (str): path to the directory where the training information will be saved
        epochs (int): number of epochs of the training
        mask (bool): True if mask is part of the input data (combined with PT and CT scans). 
        mixed_data (bool): True if structured data added to image data. Default to False
        dict_struct_data (dict): dictionary associating criteria to the columns number of the excel file containg the criteria. Default to None.
        mixed_data_info (list): list containing the different criteria that can be used. Default to None.
    
    Returns: 
        (dict): return a dictionnary containing the different loss and metrics calculated during the training
    """
    mixed_data_columns = []  #automatic filling
    for i in mixed_data_info:
        mixed_data_columns.append(dict_struct_data[i])

    now = datetime.now().strftime("%Y%m%d-%H:%M:%S")
    logdir = os.path.join(
        training_model_folder,
        'logs')  #path for logs of the trained model (tensorboard)
    if not os.path.exists(logdir):
        os.makedirs(logdir)

    reduced_size = 0.5  # needed if batch_size to big for GPU (divided images size by 2 => reduced_size == 0.5) => standard size: (256,128,128) (reduced_size == 0

    ##########################   IMAGE PROCESSING PARAMETERS   ##########################

    modalities = ('pet_img', 'ct_img')  #input neural network ct and pet image
    target_direction = (1, 0, 0, 0, 1, 0, 0, 0, 1)
    target_size = (128, 128, 256)
    target_spacing = (4.0, 4.0, 4.0)
    if reduced_size != 0:
        target_size = tuple(round(x * reduced_size) for x in target_size)
        target_spacing = tuple(round(x / reduced_size) for x in target_spacing)
    ##########################   DATA MANAGER  ##########################

    DM = DataManagerSurvival(base_path,
                             excel_path,
                             mask,
                             mixed_data,
                             mixed_data_columns,
                             pod24=False)
    DM.get_data_survival(create_csv)

    if mixed_data:
        struct_data = DM.struct_data
    else:
        struct_data = None
    x = DM.nifti
    y = DM.time

    loss_and_metrics = {}
    kfold = KFold(n_splits=K, shuffle=True)
    fold_no = 1

    time_horizon = math.ceil(max(abs(y)) *
                             1.2)  #number of neurons on the output layer
    last_layer = "softmax"
    patience = 10
    ReduceLROnPlateau1 = True
    EarlyStopping1 = False
    ModelCheckpoint1 = True
    TensorBoard1 = True
    patience_ES = 30
    censure_val = []

    for train, val in kfold.split(x, y, struct_data):

        print('----------------------------------------------------')
        print(f'Training for fold {fold_no} ...')

        alpha = 0.4  #cross_entropy loss factor
        beta = 0.6  # ranking loss factor
        optimizer = tf.keras.optimizers.RMSprop(lr=0.0001)
        #optimizer = tfa.optimizers.AdamW(learning_rate=1e-4, weight_decay=1e-4)
        loss = get_loss_survival(time_horizon_dim=time_horizon,
                                 alpha=alpha,
                                 beta=beta)
        c_index = metric_cindex(time_horizon_dim=time_horizon)
        metrics = [c_index]
        #loss = get_loss_cox
        #metrics = [concordance_index_censored_cox]

        print('----------------------------------------------------')
        print(f'Training for fold {fold_no} ...')

        logdir_fold = logdir + '_fold_no_' + str(fold_no)
        if not os.path.exists(logdir_fold):
            os.makedirs(logdir_fold)

        callbacks = []
        if ReduceLROnPlateau1 == True:
            # reduces learning rate if no improvement are seen
            learning_rate_reduction = ReduceLROnPlateau(monitor='loss',
                                                        patience=patience,
                                                        verbose=1,
                                                        factor=0.5,
                                                        min_lr=0.0000001)
            callbacks.append(learning_rate_reduction)

        if EarlyStopping1 == True:
            # stop training if no improvements are seen
            early_stop = EarlyStopping(monitor="val_loss",
                                       mode="min",
                                       patience=int(patience_ES),
                                       restore_best_weights=True)
            callbacks.append(early_stop)

        if ModelCheckpoint1 == True:
            # saves model weights to file
            weights_file = 'model_weights_' + str(fold_no) + '.h5'
            checkpoint = ModelCheckpoint(
                os.path.join(training_model_folder, weights_file),
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                mode='min',  # max
                save_weights_only=False)
            callbacks.append(checkpoint)

        if TensorBoard1 == True:
            tensorboard_callback = TensorBoard(log_dir=logdir_fold,
                                               histogram_freq=0,
                                               update_freq='epoch',
                                               write_graph=True,
                                               write_images=True)
            callbacks.append(tensorboard_callback)

        #c_index_weighted= metric_cindex_weighted(time_horizon_dim=time_horizon,batch_size=batch_size, y_val=y_val)
        #td_c_index = metric_td_c_index(time_horizon_dim=time_horizon, batch_size=batch_size)

        #IMAGE PROCESSING
        #train_transforms = get_transform('train', modalities,target_size, target_spacing, target_direction, None, data_augmentation = True,  mask_survival= mask)
        #val_transforms = get_transform('val', modalities, target_size, target_spacing, target_direction, None,  data_augmentation = False, mask_survival= mask)
        dict_data = {}
        dict_data['x_train'] = [x[element] for element in train]
        dict_data['x_val'] = [x[element] for element in val]
        dict_data['y_train'] = [y[element] for element in train]
        dict_data['y_val'] = [y[element] for element in val]

        batch_size_train = len(dict_data['y_train'])
        batch_size_val = len(dict_data['y_val'])
        #censure_val.append(np.sum(np.where(np.array(y_val)<0, 1, 0))/len(y_val)*100)
        #print("Nombre patients non censurés fold no ", fold_no, " training : ", np.sum(np.where(np.array(y_train)<0, 0, 1)))
        #print("Censure fold no ", fold_no, " training :", np.sum(np.where(np.array(y_train)<0, 1, 0))/len(y_train), "%")
        #print("Censure fold no ", fold_no, " validation :", np.sum(np.where(np.array(y_val)<0, 0, 1)))
        #print("Censure fold no ", fold_no, " validation :", np.sum(np.where(np.array(y_val)<0, 1, 0))/len(y_val), "%")
        if mixed_data:
            dict_data['train_struct_data'] = [
                struct_data[element] for element in train
            ]
            dict_data['val_struct_data'] = [
                struct_data[element] for element in val
            ]

        train_dataset, val_dataset = DM.cross_val_dataset(
            dict_data, modalities, mask, target_size, target_spacing,
            target_direction, None)
        print(val_dataset)

        in_channels = len(modalities)
        if mask:
            in_channels += 1
        out_channels = 1
        channels_last = True
        keep_prob = 1
        keep_prob_last_layer = 0.8
        kernel_size = (5, 5, 5)
        num_channels = 12

        num_levels = 4
        num_convolutions = (1, 2, 1)
        num_levels = len(num_convolutions)
        bottom_convolutions = 1
        activation = "relu"
        image_shape = (256, 128, 128)
        if reduced_size != 0:
            image_shape = tuple(round(x * reduced_size) for x in image_shape)

        if mixed_data:
            dim_mlp = len(mixed_data_columns
                          )  #dim input layer structured data neural network
        else:
            dim_mlp = 0

        if mixed_data:
            architecture = 'vnet_survival_mixed_data'
        else:
            architecture = 'vnet_survival_cross'

        model = VnetSurvival(image_shape,
                             in_channels,
                             out_channels,
                             time_horizon,
                             channels_last,
                             keep_prob,
                             keep_prob_last_layer,
                             kernel_size,
                             num_channels,
                             num_levels,
                             num_convolutions,
                             bottom_convolutions,
                             activation,
                             last_layer,
                             activation,
                             mixed_data,
                             dim_mixed_data_input=dim_mlp).create_network()

        model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        print(model.summary())

        model_json = model.to_json()
        with open(
                os.path.join(
                    training_model_folder,
                    'architecture_{}_model_{}.json'.format(architecture, now)),
                "w") as json_file:
            json_file.write(model_json)

        history = model.fit(
            train_dataset,
            steps_per_epoch=int(len(dict_data['y_train']) / batch_size_train),
            validation_data=val_dataset,
            validation_steps=int(len(dict_data['y_val']) / batch_size_val),
            epochs=epochs,
            callbacks=callbacks,  # initial_epoch=0,
            verbose=1)

        print(history.history.keys())
        print(f'Loss for fold {fold_no}:')
        index_loss_val = np.argmin(history.history['val_loss'])

        if fold_no == 1:
            for i in history.history.keys():
                loss_and_metrics[i] = []

        for i in history.history.keys():
            if history.history[i][index_loss_val] != -1:
                loss_and_metrics[i].append(history.history[i][index_loss_val])
            print(i + " : " + str(history.history[i][index_loss_val]))
        x_axis = np.arange(0, len(history.history['loss']))

        fig, (ax1, ax2) = plt.subplots(1, 2)
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['validation', 'training'])
        fig.suptitle('Loss and accuracy plot')
        ax1.plot(x_axis, history.history['val_loss'], x_axis,
                 history.history['loss'])
        ax2.plot(x_axis, history.history['val_cindex'], x_axis,
                 history.history['cindex'])
        #ax1.plot(x_axis, history.history['val_loss'],x_axis, history.history['loss'])
        #ax2.plot(x_axis, history.history['val_concordance_index_censored_cox'], x_axis, history.history['concordance_index_censored_cox'])
        #plt.plot(x_axis, history.history['val_loss'],x_axis, history.history['loss'])
        #plt.title('Loss and accuracy model no '+str(fold_no))
        #fig.legend(['val_loss', 'loss', 'val_accuracy', 'accuracy'])

        plt.savefig(training_model_folder + '/loss_' + str(fold_no) + '.png')
        plt.show()
        fold_no += 1

    print('-----------------------------------------------------------------')
    print(f'Loss and metrics per fold')
    for i in loss_and_metrics.keys():
        print(
            '------------------------------------------------------------------'
        )
        for j in range(len(loss_and_metrics[i])):
            print(f'> {i}: {loss_and_metrics[i][j]}')
        print(
            '------------------------------------------------------------------'
        )

    print(f'Loss and metrics per fold average and meean')
    print('------------------------------------------------------------------')
    for i in loss_and_metrics.keys():
        print(
            f'> {i}: {np.mean(loss_and_metrics[i])}(+- {np.std(loss_and_metrics[i])})'
        )
    print('------------------------------------------------------------------')
    for i in censure_val:
        print("censure validation fold  : ", i)
    create_info_file(loss_and_metrics, training_model_folder, mask, mixed_data,
                     mixed_data_info, batch_size_train, batch_size_val, epochs,
                     reduced_size, alpha, beta, censure_val, num_convolutions)

    return 0
Beispiel #23
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)

    if FLAGS.tiny:
        model = YoloV3Tiny(FLAGS.size, training=True,
                           classes=FLAGS.num_classes)
        anchors = yolo_tiny_anchors
        anchor_masks = yolo_tiny_anchor_masks
    else:
        model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)
        anchors = yolo_anchors
        anchor_masks = yolo_anchor_masks

    train_dataset = dataset.load_fake_dataset()
    if FLAGS.dataset:
        train_dataset = dataset.load_tfrecord_dataset(
            FLAGS.dataset, FLAGS.classes, FLAGS.size)
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    val_dataset = dataset.load_fake_dataset()
    if FLAGS.val_dataset:
        val_dataset = dataset.load_tfrecord_dataset(
            FLAGS.val_dataset, FLAGS.classes, FLAGS.size)
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    # Configure the model for transfer learning
    if FLAGS.transfer == 'none':
        pass  # Nothing to do
    elif FLAGS.transfer in ['darknet', 'no_output']:
        # Darknet transfer is a special case that works
        # with incompatible number of classes

        # reset top layers
        if FLAGS.tiny:
            model_pretrained = YoloV3Tiny(
                FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
        else:
            model_pretrained = YoloV3(
                FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
        model_pretrained.load_weights(FLAGS.weights)

        if FLAGS.transfer == 'darknet':
            model.get_layer('yolo_darknet').set_weights(
                model_pretrained.get_layer('yolo_darknet').get_weights())
            freeze_all(model.get_layer('yolo_darknet'))

        elif FLAGS.transfer == 'no_output':
            for l in model.layers:
                if not l.name.startswith('yolo_output'):
                    l.set_weights(model_pretrained.get_layer(
                        l.name).get_weights())
                    freeze_all(l)

    else:
        # All other transfer require matching classes
        print('yes')
        model.load_weights(FLAGS.weights)
        if FLAGS.transfer == 'fine_tune':
            # freeze darknet and fine tune other layers
            darknet = model.get_layer('yolo_darknet')
            freeze_all(darknet)
        elif FLAGS.transfer == 'frozen':
            # freeze everything
            freeze_all(model)

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [YoloLoss(anchors[mask], classes=FLAGS.num_classes)
            for mask in anchor_masks]

    if FLAGS.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                with tf.GradientTape() as tape:
                    outputs = model(images, training=True)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    for output, label, loss_fn in zip(outputs, labels, loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                grads = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(
                    zip(grads, model.trainable_variables))

                logging.info("{}_train_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_loss.update_state(total_loss)

            for batch, (images, labels) in enumerate(val_dataset):
                outputs = model(images)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                for output, label, loss_fn in zip(outputs, labels, loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                logging.info("{}_val_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_val_loss.update_state(total_loss)

            logging.info("{}, train: {}, val: {}".format(
                epoch,
                avg_loss.result().numpy(),
                avg_val_loss.result().numpy()))

            avg_loss.reset_states()
            avg_val_loss.reset_states()
            model.save_weights(
                'checkpoints/yolov3_train_{}.tf'.format(epoch))
    else:
        model.compile(optimizer=optimizer, loss=loss,
                      run_eagerly=(FLAGS.mode == 'eager_fit'))

        callbacks = [
            ReduceLROnPlateau(verbose=1),
            EarlyStopping(patience=3, verbose=1),
            ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',
                            verbose=1, save_weights_only=True),
            TensorBoard(log_dir='logs')
        ]

        history = model.fit(train_dataset,
                            epochs=FLAGS.epochs,
                            callbacks=callbacks,
                            validation_data=val_dataset)
Beispiel #24
0
def train(cfg, args):
    # Basic configurations for GPU and CPU
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    if args.gpu > -1:
        gpus = tf.config.list_physical_devices('GPU')
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)

    # Reading data
    X, idx = read_genus_abu(args.input)
    Y = read_labels(args.labels, shuffle_idx=idx, dmax=get_dmax(args.labels))
    print('Reordering labels and samples...')
    IDs = sorted(
        list(set(X.index.to_list()).intersection(Y[0].index.to_list())))
    X = X.loc[IDs, :]
    Y = [y.loc[IDs, :] for y in Y]
    print('Total matched samples:', sum(X.index == Y[0].index))

    # Reading basic configurations from config file
    pretrain_ep = cfg.getint('train', 'pretrain_ep')
    pretrain_lr = cfg.getfloat('train', 'pretrain_lr')

    lr = cfg.getfloat('train', 'lr')
    epochs = cfg.getint('train', 'epochs')
    reduce_patience = cfg.getint('train', 'reduce_patience')
    stop_patience = cfg.getint('train', 'stop_patience')
    batch_size = cfg.getint('train', 'batch_size')
    lrreducer = ReduceLROnPlateau(monitor='val_loss',
                                  patience=reduce_patience,
                                  min_lr=1e-5,
                                  verbose=5,
                                  factor=0.1)
    stopper = EarlyStopping(monitor='val_loss',
                            patience=stop_patience,
                            verbose=5,
                            restore_best_weights=True)
    pretrain_callbacks = []
    callbacks = [lrreducer, stopper]
    if args.log:
        pretrain_logger = CSVLogger(filename=args.log)
        logger = CSVLogger(filename=args.log)
        pretrain_callbacks.append(pretrain_logger)
        callbacks.append(logger)
    phylogeny = pd.read_csv(find_pkg_resource('resources/phylogeny.csv'),
                            index_col=0)
    pretrain_opt = Adam(lr=pretrain_lr)
    optimizer = Adam(lr=lr)
    dropout_rate = args.dropout_rate

    # Calculate sample weight for each layer, assign 0 weight for sample with 0 labels
    #sample_weight = [zero_weight_unk(y=y, sample_weight=compute_sample_weight(class_weight='balanced',
    #																		  y=y.to_numpy().argmax(axis=1)))
    #				 for i, y in enumerate(Y_train)]

    # Build the model
    ontology = load_otlg(args.otlg)
    _, layer_units = parse_otlg(ontology)
    model = Model(phylogeny=phylogeny,
                  num_features=X.shape[1],
                  ontology=ontology,
                  dropout_rate=dropout_rate)

    # Feature encoding and standardization
    X = model.encoder.predict(X, batch_size=batch_size)
    X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])
    print('N. NaN in input features:', np.isnan(X).sum())
    model.update_statistics(mean=X.mean(axis=0), std=X.std(axis=0))
    X = model.standardize(X)

    #------------------------------- SELECTIVE LEARNING-----------------------------------------------
    # Sample weight "zero" to mask unknown samples' contribution to loss
    sample_weight = [
        zero_weight_unk(y=y, sample_weight=np.ones(y.shape[0]))
        for i, y in enumerate(Y)
    ]
    Y = [y.drop(columns=['Unknown']) for y in Y]

    # Train EXPERT model
    print('Pre-training using Adam with lr={}...'.format(pretrain_lr))
    model.nn.compile(optimizer=pretrain_opt,
                     loss=BinaryCrossentropy(from_logits=True),
                     loss_weights=(np.array(layer_units) /
                                   sum(layer_units)).tolist(),
                     weighted_metrics=[BinaryAccuracy(name='acc')])
    model.nn.fit(X,
                 Y,
                 validation_split=args.val_split,
                 batch_size=batch_size,
                 epochs=pretrain_ep,
                 sample_weight=sample_weight,
                 callbacks=pretrain_callbacks)

    model.nn.summary()

    print('Training using Adam with lr={}...'.format(lr))
    model.nn.compile(optimizer=optimizer,
                     loss=BinaryCrossentropy(from_logits=True),
                     loss_weights=(np.array(layer_units) /
                                   sum(layer_units)).tolist(),
                     weighted_metrics=[BinaryAccuracy(name='acc')])
    model.nn.fit(X,
                 Y,
                 validation_split=args.val_split,
                 batch_size=batch_size,
                 initial_epoch=pretrain_ep,
                 epochs=epochs + pretrain_ep,
                 sample_weight=sample_weight,
                 callbacks=callbacks)

    # Save the EXPERT model
    model.save_blocks(args.output)
Beispiel #25
0
            warmup_steps = int(warmup_epoch * num_train / batch_size)
            # 学习率
            reduce_lr = WarmUpCosineDecayScheduler(
                learning_rate_base=learning_rate_base,
                total_steps=total_steps,
                warmup_learning_rate=1e-4,
                warmup_steps=warmup_steps,
                hold_base_rate_steps=num_train,
                min_learn_rate=1e-6)
            model.compile(optimizer=Adam(),
                          loss={
                              'yolo_loss': lambda y_true, y_pred: y_pred
                          })
        else:
            reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                          factor=0.5,
                                          patience=2,
                                          verbose=1)
            model.compile(optimizer=Adam(learning_rate_base),
                          loss={
                              'yolo_loss': lambda y_true, y_pred: y_pred
                          })

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit(
            data_generator(lines[:num_train],
                           batch_size,
                           input_shape,
                           anchors,
                           num_classes,
                           mosaic=mosaic),
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.optimizers import Adam
# %load_ext tensorboard
LEARNING_RATE = 1e-3
EARLY_STOPPING_CRITERIA = 3
NUM_EPOCHS = 50

# Compile
model.compile(optimizer=Adam(lr=LEARNING_RATE),
              loss=SparseCategoricalCrossentropy(),
              metrics=[SparseCategoricalAccuracy()])

# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
             ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
             TensorBoard(log_dir='tensorboard', histogram_freq=1, update_freq='epoch')]

# Training
training_history = model.fit(x=X_train, y=y_train,
                             epochs=NUM_EPOCHS,
                             validation_data=(X_test, y_test), 
                             callbacks=callbacks,
                             batch_size=64,
                             verbose=1)



"""##Validation Data"""

def get_performance(y_true, y_pred, classes):
Beispiel #27
0
    return model


#next step
model = create_model(input_shape=(HEIGHT, WIDTH, COLORS), n_out=N_CLASSES)
model.summary()

metric_list = ["accuracy"]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=metric_list)

rlrop = ReduceLROnPlateau(monitor='val_loss',
                          mode='min',
                          patience=RLROP_PATIENCE,
                          factor=DECAY_DROP,
                          min_lr=1e-7,
                          verbose=1)

callback_list = [rlrop]

# warm up training phase, only two epochs
STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
STEP_SIZE_VALID = val_generator.n // val_generator.batch_size

history_warmup = model.fit_generator(generator=train_generator,
                                     steps_per_epoch=STEP_SIZE_TRAIN,
                                     validation_data=val_generator,
                                     validation_steps=STEP_SIZE_VALID,
                                     epochs=WARMUP_EPOCHS,
                                     verbose=1).history
# compile the model (should be done after setting layers to non-trainable)
model.compile(
    optimizer=optimizer,
    loss="categorical_crossentropy",
    metrics=['accuracy'],
)

# train the model
# define callback function
early_stopping = EarlyStopping(
    monitor='val_loss',
    patience=10,
)
reduce_lr = ReduceLROnPlateau(
    monitor='val_loss',
    factor=0.1,
    patience=5,
)

model.fit_generator(train_generator,
                    steps_per_epoch=train_image_numbers // batch_size,
                    epochs=epochs,
                    validation_data=validation_generator,
                    validation_steps=validation_steps,
                    callbacks=[early_stopping, reduce_lr])

# test the model
test_metrics = model.evaluate_generator(
    test_generator,
    steps=1,
)
Beispiel #29
0
     print('\033[32;1mLoading Model\033[0m')
     model.load_weights(model_path)
 if training:
     trainX, validX, trainY_SOS, validY_SOS, trainY, validY, line_len, valid_line_len = train_data_preprocessing(
         inputX, word2idx, max_seq_len)
     print(
         f'\033[32;1mtrainX: {trainX.shape}, validX: {validX.shape}, trainY: {trainY.shape}, validY: {validY.shape}, trainY_SOS: {trainY_SOS.shape}, validY_SOS: {validY_SOS.shape}\033[0m'
     )
     checkpoint = ModelCheckpoint(model_path,
                                  'val_word_out_loss',
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=True)
     reduce_lr = ReduceLROnPlateau('val_word_out_loss',
                                   0.5,
                                   3,
                                   verbose=1,
                                   min_lr=1e-6)
     logger = CSVLogger(model_path + '.csv', append=True)
     tensorboard = TensorBoard(model_path[:model_path.rfind('.')] + '_logs',
                               histogram_freq=1,
                               batch_size=1024,
                               write_grads=True,
                               write_images=True,
                               update_freq=512)
     epochs = 10
     for epoch in range(epochs):
         print(f'\033[32;1mepoch: {epoch+1}/{epochs}\033[0m')
         ith, ith_str, word = generate_word(trainY, line_len)
         #print(' '.join([idx2word[i] for i in trainX[8347]]).strip(), ith[8347, 0], idx2word[word[8347, 0]])
         #print(' '.join([idx2word[i] for i in trainY[8347]]).strip())
Beispiel #30
0
def train_res_net(rotation_range_var=10,
                  zoom_range_var=0.1,
                  width_shift_range_var=0.1,
                  height_shift_range_var=0.1,
                  horizontal_flip_var=True,
                  vertical_flip_var=True,
                  batch_size_var=32,
                  activation_func_var='relu',
                  epoch_var=30,
                  steps_per_epoch_var=256,
                  validation_steps_var=256):

    train_dir = '../../dump/intel-image-classification/seg_train/seg_train/'
    test_dir = '../../dump/intel-image-classification/seg_test/seg_test/'

    data_gen = ImageDataGenerator(featurewise_center=False,
                                  samplewise_center=False,
                                  featurewise_std_normalization=False,
                                  samplewise_std_normalization=False,
                                  zca_whitening=False,
                                  rotation_range=rotation_range_var,
                                  zoom_range=zoom_range_var,
                                  width_shift_range=width_shift_range_var,
                                  height_shift_range=height_shift_range_var,
                                  horizontal_flip=horizontal_flip_var,
                                  vertical_flip=vertical_flip_var)

    train_gen = data_gen.flow_from_directory(train_dir,
                                             target_size=(150, 150),
                                             batch_size=32,
                                             class_mode='categorical')

    test_gen = data_gen.flow_from_directory(test_dir,
                                            target_size=(150, 150),
                                            batch_size=32,
                                            class_mode='categorical')

    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)

    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=3,
                                                verbose=1,
                                                factor=0.25,
                                                min_lr=0.000001)

    optimizer = Adam(learning_rate=0.5e-4,
                     beta_1=0.9,
                     beta_2=0.999,
                     amsgrad=False)

    base_model = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=(150, 150, 3),
                          pooling='avg')
    base_model.trainable = False

    x = Dense(512, activation='relu')(base_model.output)
    x = Dropout(0.5)(x)
    x = Dense(6, activation='softmax')(x)

    transfer_model = Model(base_model.input, x)
    transfer_model.compile(optimizer=optimizer,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

    history = transfer_model.fit_generator(
        train_gen,
        steps_per_epoch=256,
        validation_data=test_gen,
        validation_steps=256,
        epochs=1,
        verbose=1,
        callbacks=[es, learning_rate_reduction])

    transfer_model.save("res_model.model")

    def get_images(directory):
        Images = []
        Labels = []
        label = 0

        for labels in os.listdir(directory):
            if labels == 'glacier':
                label = 2
            elif labels == 'sea':
                label = 4
            elif labels == 'buildings':
                label = 0
            elif labels == 'forest':
                label = 1
            elif labels == 'street':
                label = 5
            elif labels == 'mountain':
                label = 3

            for image_file in os.listdir(directory + labels):
                image = cv2.imread(directory + labels + r'/' + image_file)
                image = cv2.resize(image, (150, 150))
                Images.append(image)
                Labels.append(label)

        return Images, Labels

    test_images, test_labels = get_images(
        '../input/intel-image-classification/seg_test/seg_test/')
    test_images = np.array(test_images)
    test_labels = np.array(test_labels)
    test_labels = to_categorical(test_labels)

    return transfer_model.evaluate(test_images, test_labels)