예제 #1
0
def build_model(model_name, act, weight, size, loss=None):
    if model_name == "inception":
        from keras.applications import InceptionV3
        _model = InceptionV3(
            weights=weight,  # inception weights
            include_top=False,
            input_shape=(size, size, 3))

    elif model_name == "resnet":
        from keras.applications import ResNet50
        _model = ResNet50(
            weights=weight,  # resnet weights
            include_top=False,
            input_shape=(size, size, 3))

    elif model_name == "VGG":
        from keras.applications import VGG16
        _model = VGG16(
            weights=weight,  # VGG weights
            include_top=False,
            input_shape=(size, size, 3))

    elif model_name == "inceptionresnet":
        from keras.applications import InceptionResNetV2
        _model = InceptionResNetV2(
            weights=weight,  # VGG weights
            include_top=False,
            input_shape=(size, size, 3))

    else:
        print("This model is not available.")
        sys.exit()

    GAP_layer = layers.GlobalAveragePooling2D()
    drop_layer = layers.Dropout(0.5)
    dense_layer = layers.Dense(5, activation=act, name='final_output')

    x = GAP_layer(_model.layers[-1].output)
    x = drop_layer(x)
    final_output = dense_layer(x)

    model = Model(_model.layers[0].input, final_output)
    if loss == loss:
        model.compile(loss=categorical_focal_loss(gamma=2.0, alpha=0.25),
                      optimizer=Adam(lr=0.00005),
                      metrics=["accuracy"])
    else:
        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(lr=0.01),
                      metrics=['accuracy'])

    return model
예제 #2
0
def create_model(dim=(256, 256), weights=np.ones(5), split=False):

    f_loss = categorical_focal_loss(alpha=weights)

    IMG_WIDTH, IMG_HEIGHT, CHANNELS = *dim, 3
    input_shape = (IMG_WIDTH, IMG_HEIGHT, CHANNELS)
    elu = keras.layers.ELU(alpha=1.0)

    # create the base pre-trained model
    # Load in EfficientNetB5
    effnet = efn.EfficientNetB4(weights=None,
                                include_top=False,
                                input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS))
    effnet.load_weights(
        '/media/parth/DATA/datasets/aptos_2019/efficientnet-b4_imagenet_1000_notop.h5'
    )

    # Replace all Batch Normalization layers by Group Normalization layers
    for i, layer in enumerate(effnet.layers):
        if "batch_normalization" in layer.name:
            effnet.layers[i] = GroupNormalization(groups=32,
                                                  axis=-1,
                                                  epsilon=0.00001)

    if split == True:

        input1 = Input(input_shape)
        input2 = Input(input_shape)
        input3 = Input(input_shape)
        input4 = Input(input_shape)
        conv1 = Conv2D(16, 3, padding='same')(input1)
        conv2 = Conv2D(16, 3, padding='same')(input2)
        conv3 = Conv2D(16, 3, padding='same')(input3)
        conv4 = Conv2D(16, 3, padding='same')(input4)
        concat = concatenate([conv1, conv2, conv3, conv4])
        enet_input = Conv2D(3, 3, padding='same')(concat)
        x = effnet(enet_input)
        x = GlobalAveragePooling2D()(x)
        x = Dense(256)(x)
        x = Dropout(0.25)(x)
        x = Dense(5)(x)
        predictions = Softmax()(x)

        model = Model(inputs=[input1, input2, input3, input4],
                      outputs=predictions)
        model.compile(loss=f_loss,
                      optimizer=RAdam(learning_rate=0.00005),
                      metrics=[f_loss, 'acc'])
        print(model.summary())

        return model

    else:

        x = effnet.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(256)(x)
        x = Dropout(0.25)(x)
        x = Dense(5)(x)
        predictions = Softmax()(x)

        model = Model(inputs=effnet.input, outputs=predictions)
        model.compile(loss=f_loss,
                      optimizer=RAdam(lr=0.00005),
                      metrics=[f_loss, 'acc'])
        print(model.summary())

        return model
예제 #3
0
##########################################################################

os.chdir('C:/Users/rfuchs/Documents/GitHub/phyto_curves_reco')

from pred_functions import pred_n_count
import tensorflow as tf
from tensorflow_addons.optimizers import Lookahead, RectifiedAdam
from time import time
from copy import deepcopy
from losses import categorical_focal_loss

# Model and nomenclature 
model = tf.keras.models.load_model('trained_models/hyperopt_model_focal2', compile = False)
model.compile(optimizer=Lookahead(RectifiedAdam(lr = 0.003589101299926042), 
                                  sync_period = 10, slow_step_size = 0.20736365316666247),
              loss = categorical_focal_loss(gamma = 2.199584705628343, alpha = 0.25))


tn = pd.read_csv('train_test_nomenclature.csv')
tn.columns = ['Particle_class', 'label']


phyto_ts = pd.DataFrame(columns = ['picoeucaryote', 'synechococcus', 'nanoeucaryote', 'cryptophyte', \
       'unassigned particle', 'airbubble', 'microphytoplancton', 'prochlorococcus', 'date'])

phyto_ts_proba = deepcopy(phyto_ts)
  

# Extracted from X_test
thrs = [0.8158158158158159, 0.7297297297297297, 0.5085085085085085, 0.3963963963963964, 0.8378378378378378, \
        0.7417417417417418, 0.42542542542542544]
예제 #4
0
##############################################################################################
############################ Try with focal loss  ############################################
##############################################################################################

batch_size = 64
STEP_SIZE_TRAIN = (len(X_train) // batch_size) + 1
STEP_SIZE_VALID = (len(X_valid) // 128) + 1

sslamm_clf = model13(X_train, y_train, dp=0.2)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=4)
ENN_check = ModelCheckpoint(filepath='tmp/weights_sslamm_focal.hdf5',\
                            verbose = 1, save_best_only=True)

ad = adam(lr=1e-2)
sslamm_clf.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], \
                   metrics=["accuracy"], optimizer=ad)

epoch_nb = 10

history = sslamm_clf.fit(X_train, y_train, validation_data=(X_valid, y_valid), \
                    steps_per_epoch = STEP_SIZE_TRAIN, validation_steps = STEP_SIZE_VALID,\
                    epochs = epoch_nb, callbacks = [ENN_check, es], class_weight = w,\
                        shuffle=True) # weights ?

sslamm_clf.load_weights('tmp/weights_sslamm_focal.hdf5')

# Compute test accuracy
start = time()
preds = np.argmax(sslamm_clf.predict(X_test), axis=1)
end = time()
hp_lambda = K.variable(1.)

model_combined, model_class = resnet.DomainResnetBuilder.build_resnet_18(
    (1, 257, 1091), 2, hp_lambda)

# model_class.summary()
# model_domain.summary()

adam = optimizers.Adam(amsgrad=False)
sgd = optimizers.SGD(lr=0.001, momentum=0.9)

model_combined.compile(
    optimizer=sgd,
    loss=[masked_loss_function,
          categorical_focal_loss(alpha=.25, gamma=5)],
    metrics=["accuracy"])

# model_combined.compile(
#         optimizer=adam,
#         loss=[masked_loss_function, "categorical_crossentropy"],
#         metrics=['accuracy'])

model_combined.fit(
    x=X_domain,
    y=[y_class, y_domain],
    epochs=10,
    batch_size=8,
    verbose=2,
    validation_data=(X_test, [y_test, y_test_domain]),
    callbacks=[lr_reducer, tensorboard,
def create_model(X_train, y_train, X_valid, y_valid, X_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """

    dp = {{uniform(0, 0.5)}}

    N_CLASSES = y_train.shape[1]
    max_len = X_train.shape[1]
    nb_curves = X_train.shape[2]

    sequence_input = tf.keras.layers.Input(shape=(max_len, nb_curves),
                                           dtype='float32')

    # A 1D convolution with 128 output channels: Extract features from the curves
    x = tf.keras.layers.Conv1D(64, 5, activation='relu')(sequence_input)
    x = tf.keras.layers.Conv1D(32, 5, activation='relu')(x)
    x = tf.keras.layers.Conv1D(16, 5, activation='relu')(x)

    # Average those features
    average = tf.keras.layers.GlobalAveragePooling1D()(x)
    dense2 = tf.keras.layers.Dense(32, activation='relu')(
        average)  # Does using 2*32 layers make sense ?
    drop2 = tf.keras.layers.Dropout(dp)(dense2)
    dense3 = tf.keras.layers.Dense(32, activation='relu')(drop2)
    drop3 = tf.keras.layers.Dropout(dp)(dense3)
    dense4 = tf.keras.layers.Dense(16, activation='relu')(drop3)
    drop4 = tf.keras.layers.Dropout(dp)(dense4)

    predictions = tf.keras.layers.Dense(N_CLASSES, activation='softmax')(drop4)

    model = tf.keras.Model(sequence_input, predictions)

    #==================================================
    # Specifying the optimizer
    #==================================================

    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
    check = ModelCheckpoint(filepath='w_focal_hyperopt.hdf5',\
                            verbose = 1, save_best_only=True)

    optim_ch = {{choice(['adam', 'ranger'])}}
    lr = {{uniform(1e-3, 1e-2)}}

    if optim_ch == 'adam':
        optim = tf.keras.optimizers.Adam(lr=lr)
    else:
        sync_period = {{choice([2, 6, 10])}}
        slow_step_size = {{normal(0.5, 0.1)}}
        rad = RectifiedAdam(lr=lr)
        optim = Lookahead(rad,
                          sync_period=sync_period,
                          slow_step_size=slow_step_size)

    batch_size = {{choice([64 * 4, 64 * 8])}}
    STEP_SIZE_TRAIN = (len(X_train) // batch_size) + 1
    STEP_SIZE_VALID = 1

    alpha = {{uniform(0, 1)}}
    gamma = {{uniform(1, 2.5)}}

    model.compile(loss=[categorical_focal_loss(alpha=alpha, gamma=gamma)],
                  metrics=['accuracy'],
                  optimizer=optim)

    result = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), \
                    steps_per_epoch = STEP_SIZE_TRAIN, validation_steps = STEP_SIZE_VALID,\
                    epochs = 60, shuffle=True, verbose=2, callbacks = [check, es])

    #Get the highest validation accuracy of the training epochs
    loss_acc = np.amin(result.history['val_loss'])
    print('Min loss of epoch:', loss_acc)
    model.load_weights('w_focal_hyperopt.hdf5')
    return {'loss': loss_acc, 'status': STATUS_OK, 'model': model}
    (1, 257, 501), 2, hp_lambda)

model_class.summary()

adam = optimizers.Adam(amsgrad=False)
sgd = optimizers.SGD(lr=0.001, momentum=0.9)

# model_class.compile(
#         optimizer=adam,
#         loss="categorical_crossentropy",
#         metrics=['accuracy'])

model_combined.compile(optimizer=adam,
                       loss=[
                           masked_loss_function,
                           categorical_focal_loss(alpha=[0.20, 1], gamma=10)
                       ],
                       metrics=["accuracy"])

# model_class.fit(
#         x=X_train,
#         y=y_train,
#         epochs=20,
#         batch_size=8,
#         verbose=2,
#         validation_data= (X_test_2016, y_test_2016), callbacks=[tensorboard, developmentSetEval(y_test_2016_saved)])

model_combined.fit(
    x=X_domain,
    y=[y_class, y_domain],
    epochs=20,