Пример #1
0
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(32, activation='relu', kernel_size=3, padding='same', kernel_initializer='TruncatedNormal'))
model.add(Conv2D(32, activation='relu', kernel_size=3, padding='same', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu',kernel_constraint=max_norm(2.), kernel_initializer='TruncatedNormal'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu',kernel_constraint=max_norm(2.), kernel_initializer='TruncatedNormal'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid', kernel_initializer='TruncatedNormal'))
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr_init), metrics=['accuracy'])
model.summary()

# Train the model:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=2, min_lr=1.e-6)
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_valid),
                    callbacks=[reduce_lr], verbose=1, shuffle=True)

# Evaluate on validation set
score = model.evaluate(X_valid, y_valid, verbose=1)
print('\nValidation loss / accuracy: %0.4f / %0.4f' % (score[0], score[1]))
y_pred = model.predict(X_valid)
fpr, tpr, _ = roc_curve(y_valid, y_pred)
roc_auc = auc(fpr, tpr)
print('Validation ROC AUC:', roc_auc)

# Evaluate on test set
score = model.evaluate(X_test, y_test, verbose=1)
print('\nTest loss / accuracy: %0.4f / %0.4f' % (score[0], score[1]))
y_pred = model.predict(X_test)
# retrieve project
project = neptune.Session(PARAMS['api_token'])\
    .get_project('kunalcgi/sandbox')


# CallBAck
with project.create_experiment(name='repair-replace-classification-exp-v2',
                           params=PARAMS,
                           description=PARAMS['description'],
                           upload_source_files=[]) as npt_exp:
    np_callback = NeptuneMonitor(npt_exp,999999999999)
model_checkpointer = ModelCheckpoint(PARAMS['model_path'], \
        verbose=1, monitor='val_accuracy', mode='max', save_best_only=True, save_weights_only=False)
weight_checkpointer = ModelCheckpoint(PARAMS['weight_path'], \
        verbose=1, monitor='val_accuracy', mode='max', save_best_only=True, save_weights_only=True)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',patience=5,verbose=1,factor=0.2,min_lr=0.0000032, cooldown=20)
earlystopper = EarlyStopping(monitor='val_mean_absolute_error', patience=20, verbose=1, mode='min')
callbacks_list = [CSVLogger(PARAMS['log_file']), model_checkpointer, weight_checkpointer, np_callback, learning_rate_reduction, earlystopper]

# Balance dataset
class_weights = class_weight.compute_class_weight('balanced',np.unique(train_generator.classes),train_generator.classes)

print ('---------------------Model training started--------------')
model.fit_generator(
        train_generator,
        steps_per_epoch=PARAMS['train_count'] // PARAMS['batch_size'],
        epochs=PARAMS['n_epochs'],
        validation_data=test_generator,
        validation_steps=PARAMS['val_count'] // PARAMS['batch_size'],
        callbacks=callbacks_list,
        class_weight=class_weights)
Пример #3
0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
    zoom_range=0.1,  # Randomly zoom image 
    width_shift_range=
    0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=
    0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=False,  # randomly flip images
    vertical_flip=False)  # randomly flip images

# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                            patience=3,
                                            verbose=1,
                                            factor=0.5,
                                            min_lr=0.00001)

datagen.fit(X_train)

# Fit the model
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=86),
                              epochs=2,
                              validation_data=(X_val, Y_val),
                              verbose=1,
                              steps_per_epoch=X_train.shape[0] // 86,
                              callbacks=[learning_rate_reduction])

#Fit without data augmentation
#history = model.fit(X_train, Y_train, batch_size = 128, epochs = 17,
def evaluate_model(X_train,
                   y_train,
                   X_test,
                   y_test,
                   X_val,
                   y_val,
                   kernel,
                   verbose=1):
    X_train, X_test, X_val = scale_data(X_train, X_test, X_val)

    epochs, batch_size = 10, 32
    n_timesteps, n_features, n_outputs = X_train.shape[1], X_train.shape[
        2], y_train.shape[1]

    model = Sequential()
    model.add(
        Conv1D(filters=84,
               kernel_size=kernel[0],
               activation='relu',
               input_shape=(n_timesteps, n_features)))
    model.add(Conv1D(filters=84, kernel_size=kernel[1], activation='relu'))
    model.add(Dropout(0.3))
    model.add(MaxPooling1D(pool_size=2))

    model.add(LSTM(units=60, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(units=65))
    model.add(Dropout(0.2))

    model.add(Dense(units=65))
    model.add(Dense(units=n_outputs, activation="softmax"))

    # Reduce the learning rate once the learning stagnates, it is good in order
    # try to scratch those last decimals of accuracy.
    reduce_lr = ReduceLROnPlateau(monitor='acc',
                                  factor=0.1,
                                  patience=4,
                                  verbose=verbose,
                                  min_delta=0.001,
                                  mode='max')

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    # Save an image of the model architecture
    # plot_model(model, show_shapes=True, to_file='data/img_models/CNN_1D_LSTM.png')

    model.fit(X_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              verbose=verbose,
              validation_data=(X_val, y_val),
              shuffle=True,
              callbacks=[reduce_lr])

    _, accuracy = model.evaluate(X_test,
                                 y_test,
                                 batch_size=batch_size,
                                 verbose=verbose)
    return accuracy
Пример #5
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/classification_dct/resnet/lcrfat/classification_dct_jpeg-deep_hdQhWLTnqWTJciRe7VM4itOQW1kBFpWw/checkpoints/epoch-70_loss-0.8205_val_loss-1.5794.h5"
        self._network = SSD300_resnet(backbone="lcrfat",
                                      dct=True,
                                      image_shape=(38, 38))

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 1000

        self.optimizer_parameters = {"lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)
        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None

        dataset_path = environ["DATASET_PATH"]
        images_2007_path = join(dataset_path, "VOC2007/JPEGImages")
        images_2012_path = join(dataset_path, "VOC2012/JPEGImages")
        self.train_sets = [(images_2007_path,
                            join(dataset_path,
                                 "VOC2007/ImageSets/Main/train.txt")),
                           (images_2012_path,
                            join(dataset_path,
                                 "VOC2012/ImageSets/Main/train.txt"))]
        self.validation_sets = [(images_2007_path,
                                 join(dataset_path,
                                      "VOC2007/ImageSets/Main/val.txt")),
                                (images_2012_path,
                                 join(dataset_path,
                                      "VOC2012/ImageSets/Main/val.txt"))]
        self.test_sets = [(images_2007_path,
                           join(dataset_path,
                                "VOC2007/ImageSets/Main/test.txt"))]

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [
            self.reduce_lr_on_plateau, self.early_stopping,
            self.terminate_on_nan
        ]

        self.input_encoder = SSDInputEncoder()

        self.train_transformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]
        self.test_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._horovod = None
        self._displayer = DisplayerObjects()
Пример #6
0
    "/storage/hpc_lkpiel/data/fbank_train_data_padded.npy",
    encoding="bytes")[..., np.newaxis]
val_data_padded = np.load("/storage/hpc_lkpiel/data/fbank_val_data_padded.npy",
                          encoding="bytes")[..., np.newaxis]
test_data_padded = np.load(
    "/storage/hpc_lkpiel/data/fbank_test_data_padded.npy",
    encoding="bytes")[..., np.newaxis]
train_data_padded = train_data_padded[np.array(trainMaleIndexes)]
val_data_padded = val_data_padded[np.array(valMaleIndexes)]
test_data_padded = test_data_padded[np.array(testMaleIndexes)]
print("DATA LOADED")
################################################################################################

reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.7,
                              patience=2,
                              min_lr=0.0001,
                              verbose=1)

kernel_regularizer = regularizers.l2(0.0001)

model_66 = Sequential([
    Conv2D(128, (3, 20),
           activation='relu',
           kernel_regularizer=kernel_regularizer,
           border_mode='valid',
           input_shape=(1107, 20, 1)),
    Conv2D(128, (5, 1),
           strides=(3, 1),
           activation='relu',
           kernel_regularizer=kernel_regularizer,
Пример #7
0
    np.random.seed(None)

    # 90%用于训练,10%用于估计。
    num_val = int(len(lines) * 0.1)
    num_train = len(lines) - num_val

    # 保存的方式,3世代保存一次
    checkpoint_period = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    # 学习率下降的方式,val_loss3次不下降就下降学习率继续训练
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=3,
                                  verbose=1)
    # 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    # 交叉熵
    model.compile(loss=loss, optimizer=Adam(lr=1e-4), metrics=['accuracy'])
    batch_size = 4
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        num_train, num_val, batch_size))

    # 开始训练
    model.fit_generator(generate_arrays_from_file(lines[:num_train],
Пример #8
0
                                               verbose=1,
                                               mode='auto')

# set model checkpoint callback (model weights will auto save in weight_save_path)
checkpoint = ModelCheckpoint(weight_save_path,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max',
                             period=1)

# monitor a learning indicator(reduce learning rate when learning effect is stagnant)
reduceLRcallback = ReduceLROnPlateau(monitor='val_acc',
                                     factor=0.5,
                                     patience=5,
                                     verbose=1,
                                     mode='auto',
                                     cooldown=0,
                                     min_lr=0)

#-----------------------------------------------------------------------------------------
#---------------------------image data generator------------------------------------------
#-----------------------------------------------------------------------------------------

# TODO: try the data augmentation method you want
train_datagen = ImageDataGenerator(
    rescale=1 / 255.,
    rotation_range=45,
    width_shift_range=
    0.2,  # degree of horizontal offset(a ratio relative to image width)
    height_shift_range=
Пример #9
0
    # fig, axes = plt.subplots(1, 2)
    # axes[0].imshow(train_images[10])
    # axes[0].set_title('image')
    # axes[1].imshow(train_masks[10][:, :, 0])
    # axes[1].set_title('mask')
    # plt.show()
    #
    # exit()
    callbacks_list = [
        ModelCheckpoint('models/unet_rgb' + str(BATCH) + '_batch.h5',
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        save_weights_only=True),
        TensorBoard(log_dir='./logs', batch_size=BATCH, write_images=True),
        ReduceLROnPlateau(verbose=1, factor=0.25, patience=3, min_lr=1e-6)
    ]

    model = Unet()

    model.summary()
    model.compile(optimizer=Adam(1e-3),
                  loss=loss,
                  metrics=[dice_score, jaccard_score])

    model_json = model.to_json()
    json_file = open('models/unet_rgb' + str(BATCH) + '_batch.json', 'w')
    json_file.write(model_json)
    json_file.close()
    print('Model saved!')