def modelBuilding(self):
        def focalLoss(alpha=1., gamma=2.):
            alpha = float(alpha)
            gamma = float(gamma)

            def multiCategoryFocalLossFixed(yTrue, yPred):
                yTrue = tf.cast(yTrue, tf.float32)
                yPred = tf.cast(yPred, tf.float32)
                yPred = K.clip(yPred, K.epsilon(), 1. - K.epsilon())
                ce = tf.multiply(yTrue, -K.log(yPred))
                weight = tf.multiply(yTrue, tf.pow(tf.subtract(1., yPred), gamma))
                fl = tf.multiply(alpha, tf.multiply(weight, ce))
                reducedF1 = tf.reduce_max(fl, axis=1)
                return tf.reduce_sum(reducedF1)

            return multiCategoryFocalLossFixed

        self.pretrainedNet = EfficientNetB4(weights="imagenet", include_top=False)
        for layer in self.pretrainedNet.layers: layer.trainable = True

        i = Input(shape=self.inputShape)
        x = self.pretrainedNet(i)
        x = GlobalAveragePooling2D()(x)
        o = Dense(7, activation=softmax, use_bias=True,
                  kernel_initializer=glorot_uniform(seed=2020),
                  bias_initializer=Zeros())(x)

        self.clf = Model(i, o)
        self.clf.compile(
            optimizer=Adam(lr=1e-3),
            loss=focalLoss(alpha=1., gamma=2.),
            metrics=["accuracy"]
        )
        self.clf.summary()
def loadModel(class_nums):
    base_model = EfficientNetB4(
        input_shape=(IMG_SIZE, IMG_SIZE, CHANNLES),
        include_top=False,
        weights=
        './models_weights/efficientnet-b4_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
    )
    # base_model=keras.applications.Xception(
    #     input_shape=(IMG_SIZE,IMG_SIZE,CHANNLES),
    #     include_top=False
    # )
    #base_model.load_weights("../models_weights/xception_weights_tf_dim_ordering_tf_kernels_notop.h5")
    # Fine tune from this layer onwards
    # base_model.summary()
    base_model.trainable = True
    fine_tune_at = len(base_model.layers) - 5
    # Freeze all the layers before the `fine_tune_at` layer
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable = False

    model = keras.Sequential([
        base_model,
        keras.layers.GlobalAveragePooling2D(),
        keras.layers.Dropout(rate=0.5),
        keras.layers.Dense(class_nums, activation="softmax")
    ])
    return model
Exemple #3
0
def efficientnet(b,
                 weights='imagenet',
                 include_top=False,
                 input_shape=(None, None, 3)):
    """Loads the appropriate EfficientNet model with weights

    :param b: The size of the EfficientNet model.
    :type b: int
    :param weights: The pretrained weights to load. Defaults to iamgenet.
    :type weights: str
    :param include_top: Include the pretrained softmax layer. Defaults to False
    :type include_top: bool
    :param input_shape: Shape of input images. Defaults to no hight or width, 3 channels.
    :type input_shape: Tuple

    :return: EfficientNet Model.
    :rtype: tf.keras.models.Model
    """

    if b == 0:
        return EfficientNetB0(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 1:
        return EfficientNetB1(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 2:
        return EfficientNetB2(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 3:
        return EfficientNetB3(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 4:
        return EfficientNetB4(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 5:
        return EfficientNetB5(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 6:
        return EfficientNetB6(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    elif b == 7:
        return EfficientNetB7(weights=weights,
                              include_top=include_top,
                              input_shape=input_shape)
    else:
        raise Exception("Invalid size for EfficientNet")
Exemple #4
0
def create_efficientNet():
    if image_model == 4:
        baseModel = EfficientNetB4(weights='imagenet',
                                   include_top=False,
                                   input_shape=input_size)
    elif image_model == 2:
        baseModel = EfficientNetB2(weights='imagenet',
                                   include_top=False,
                                   input_shape=input_size)
    elif image_model == 0:
        baseModel = EfficientNetB0(weights='imagenet',
                                   include_top=False,
                                   input_shape=input_size)
    probs = baseModel.layers.pop()
    top_conv = probs.input
    headModel = layers.Activation(swish, name='top_activation')(top_conv)
    headModel = layers.GlobalAveragePooling2D(name='avg_pool')(headModel)
    headModel = layers.Dropout(0.2, name='top_dropout')(headModel)
    headModel = layers.Dense(num_classes, activation='softmax')(headModel)
    model = Model(inputs=baseModel.input, outputs=headModel)
    return model
Exemple #5
0
TRAIN_STEP_PER_EPOCH = tf.math.ceil(train_images_len / BATCH_SIZE).numpy()
VALID_STEP_PER_EPOCH = tf.math.ceil(valid_images_len / BATCH_SIZE).numpy()

# 기본 Dataset 만들기

train_ds = make_tf_dataset(train_images, train_labels)
valid_ds = make_tf_dataset(valid_images, valid_labels)

train_ds = train_ds.repeat().batch(BATCH_SIZE).prefetch(
    tf.data.experimental.AUTOTUNE)  # tf.data.experimental.AUTOTUNE
valid_ds = valid_ds.repeat().batch(BATCH_SIZE).prefetch(
    tf.data.experimental.AUTOTUNE)

base_model = EfficientNetB4(weights='imagenet',
                            include_top=False,
                            input_shape=(IMG_SIZE, IMG_SIZE, 3))


def create_model():
    model = models.Sequential()
    model.add(base_model)
    model.add(GlobalAveragePooling2D())
    model.add(Dense(256))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(Dense(train_labels_len, activation='softmax'))
    return model


model = create_model()
Exemple #6
0
def UEfficientNetB4(
        input_shape=(256, 256, 3), dropout_rate=0.5,
        imagenet_weights='imagenet'):
    backbone = EfficientNetB4(weights=imagenet_weights,
                              include_top=False,
                              input_shape=input_shape)
    input = backbone.input
    start_neurons = 8

    conv4 = backbone.layers[342].output
    conv4 = keras.layers.LeakyReLU(alpha=0.1)(conv4)
    pool4 = keras.layers.MaxPooling2D((2, 2))(conv4)
    pool4 = keras.layers.Dropout(dropout_rate)(pool4)

    # Middle
    convm = keras.layers.Conv2D(start_neurons * 32, (3, 3),
                                activation=None,
                                padding="same",
                                name='conv_middle')(pool4)
    convm = residual_block(convm, start_neurons * 32)
    convm = residual_block(convm, start_neurons * 32)
    convm = keras.layers.LeakyReLU(alpha=0.1)(convm)

    deconv4 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3),
                                           strides=(2, 2),
                                           padding="same")(convm)
    deconv4_up1 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv4)
    deconv4_up2 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv4_up1)
    deconv4_up3 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv4_up2)
    uconv4 = keras.layers.concatenate([deconv4, conv4])
    uconv4 = keras.layers.Dropout(dropout_rate)(uconv4)

    uconv4 = keras.layers.Conv2D(start_neurons * 16, (3, 3),
                                 activation=None,
                                 padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    #     uconv4 = residual_block(uconv4,start_neurons * 16)
    uconv4 = keras.layers.LeakyReLU(alpha=0.1)(uconv4)  # conv1_2

    deconv3 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3),
                                           strides=(2, 2),
                                           padding="same")(uconv4)
    deconv3_up1 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv3)
    deconv3_up2 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv3_up1)
    conv3 = backbone.layers[154].output
    uconv3 = keras.layers.concatenate([deconv3, deconv4_up1, conv3])
    uconv3 = keras.layers.Dropout(dropout_rate)(uconv3)

    uconv3 = keras.layers.Conv2D(start_neurons * 8, (3, 3),
                                 activation=None,
                                 padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    #     uconv3 = residual_block(uconv3,start_neurons * 8)
    uconv3 = keras.layers.LeakyReLU(alpha=0.1)(uconv3)

    deconv2 = keras.layers.Conv2DTranspose(start_neurons * 4, (3, 3),
                                           strides=(2, 2),
                                           padding="same")(uconv3)
    deconv2_up1 = keras.layers.Conv2DTranspose(start_neurons * 4, (3, 3),
                                               strides=(2, 2),
                                               padding="same")(deconv2)
    conv2 = backbone.layers[89].output  #92=>89
    uconv2 = keras.layers.concatenate(
        [deconv2, deconv3_up1, deconv4_up2, conv2])

    uconv2 = keras.layers.Dropout(0.1)(uconv2)
    uconv2 = keras.layers.Conv2D(start_neurons * 4, (3, 3),
                                 activation=None,
                                 padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    #     uconv2 = residual_block(uconv2,start_neurons * 4)
    uconv2 = keras.layers.LeakyReLU(alpha=0.1)(uconv2)

    deconv1 = keras.layers.Conv2DTranspose(start_neurons * 2, (3, 3),
                                           strides=(2, 2),
                                           padding="same")(uconv2)
    conv1 = backbone.layers[30].output
    uconv1 = keras.layers.concatenate(
        [deconv1, deconv2_up1, deconv3_up2, deconv4_up3, conv1])

    uconv1 = keras.layers.Dropout(0.1)(uconv1)
    uconv1 = keras.layers.Conv2D(start_neurons * 2, (3, 3),
                                 activation=None,
                                 padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    #     uconv1 = residual_block(uconv1,start_neurons * 2)
    uconv1 = keras.layers.LeakyReLU(alpha=0.1)(uconv1)

    uconv0 = keras.layers.Conv2DTranspose(start_neurons * 1, (3, 3),
                                          strides=(2, 2),
                                          padding="same")(uconv1)
    uconv0 = keras.layers.Dropout(0.1)(uconv0)
    uconv0 = keras.layers.Conv2D(start_neurons * 1, (3, 3),
                                 activation=None,
                                 padding="same")(uconv0)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    #     uconv0 = residual_block(uconv0,start_neurons * 1)
    uconv0 = keras.layers.LeakyReLU(alpha=0.1)(uconv0)
    uconv0 = keras.layers.Dropout(dropout_rate / 2)(uconv0)

    d1 = keras.layers.UpSampling2D(size=(2, 2))(uconv0)
    d1 = keras.layers.Conv2D(1, (3, 3),
                             padding="same",
                             activation=None,
                             use_bias=False)(d1)
    d11 = keras.layers.Activation('sigmoid', name='d1')(d1)

    d2 = keras.layers.UpSampling2D(size=(4, 4))(uconv1)
    d2 = keras.layers.Conv2D(1, (3, 3),
                             padding="same",
                             activation=None,
                             use_bias=False)(d2)
    d22 = keras.layers.Activation('sigmoid', name='d2')(d2)

    d3 = keras.layers.UpSampling2D(size=(8, 8))(uconv2)
    d3 = keras.layers.Conv2D(1, (3, 3),
                             padding="same",
                             activation=None,
                             use_bias=False)(d3)
    d33 = keras.layers.Activation('sigmoid', name='d3')(d3)

    d4 = keras.layers.UpSampling2D(size=(16, 16))(uconv3)
    d4 = keras.layers.Conv2D(1, (3, 3),
                             padding="same",
                             activation=None,
                             use_bias=False)(d4)
    d44 = keras.layers.Activation('sigmoid', name='d4')(d4)

    d5 = keras.layers.UpSampling2D(size=(32, 32))(uconv4)
    d5 = keras.layers.Conv2D(1, (3, 3),
                             padding="same",
                             activation=None,
                             use_bias=False)(d5)
    d55 = keras.layers.Activation('sigmoid', name='d5')(d5)

    d = keras.layers.concatenate([d1, d2, d3, d4, d5, input])
    d = keras.layers.Conv2D(1,
                            kernel_size=3,
                            activation=None,
                            padding='same',
                            use_bias=False)(d)
    d = keras.layers.Activation('sigmoid', name='d')(d)
    model = keras.models.Model(inputs=input,
                               outputs=[d, d11, d22, d33, d44, d55])
    #model.name = 'u-xception'
    '''
    Total params: 10,501,068
    Trainable params: 10,435,420
    Non-trainable params: 65,648
    '''
    return model
Exemple #7
0
    horizontal_flip=True)
test_imageDataGenerator = ImageDataGenerator(
    preprocessing_function=preprocess_input)
train_data = train_imageDataGenerator.flow_from_directory(
    train_dir,
    target_size=(380, 380),
    classes=['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc'],
    batch_size=batch_size_train)
valid_data = test_imageDataGenerator.flow_from_directory(
    valid_dir,
    target_size=(380, 380),
    classes=['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc'],
    batch_size=batch_size_valid)

# Load model - Initial Download
effnet = EfficientNetB4(weights='imagenet')

effnet.summary()

print(len(effnet.layers))

# Here we configure and create a new model from the existing one
x = effnet.layers[-3].output
x = Dropout(0.30)(x)
predictions = Dense(7, activation='softmax')(x)
model = Model(inputs=effnet.input, outputs=predictions)

# Set all the layers except the last 141 as trainable: This can be changed in the future
for layer in model.layers[:-141]:
    layer.trainable = False
Exemple #8
0
def build_model(base_model, input_shape, metrics, loss, loss_weights,
                **kwargs):

    if base_model == 'resnet50':
        from tensorflow.keras.applications import ResNet50
        base_model = ResNet50(include_top=False,
                              weights='imagenet',
                              input_shape=input_shape)
    if base_model == 'densenet121':
        from tensorflow.keras.applications import DenseNet121
        base_model = DenseNet121(include_top=False,
                                 weights='imagenet',
                                 input_shape=input_shape)
    if base_model == 'efficientnetb0':
        from efficientnet.tfkeras import EfficientNetB0
        base_model = EfficientNetB0(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)
    if base_model == 'efficientnetb1':
        from efficientnet.tfkeras import EfficientNetB1
        base_model = EfficientNetB1(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)
    if base_model == 'efficientnetb2':
        from efficientnet.tfkeras import EfficientNetB2
        base_model = EfficientNetB2(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)

    if base_model == 'efficientnetb3':
        from efficientnet.tfkeras import EfficientNetB3
        base_model = EfficientNetB3(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)
    if base_model == 'efficientnetb4':
        from efficientnet.tfkeras import EfficientNetB4
        base_model = EfficientNetB4(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)
    if base_model == 'efficientnetb5':
        from efficientnet.tfkeras import EfficientNetB5
        base_model = EfficientNetB5(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)

    x_in = Input(shape=input_shape)
    x = Conv2D(3, (3, 3), padding='same')(x_in)
    x = base_model(x)

    x = GlobalAvgPool2D()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)

    out_grapheme = Dense(168, activation='softmax', name='root')(x)
    out_vowel = Dense(11, activation='softmax', name='vowel')(x)
    out_consonant = Dense(7, activation='softmax', name='consonant')(x)

    model = Model(inputs=x_in,
                  outputs=[out_grapheme, out_vowel, out_consonant])
    model.compile(Adam(lr=0.0001),
                  metrics=metrics,
                  loss=loss,
                  loss_weights=loss_weights)

    return model
train_images, train_labels, train_images_len, train_labels_len = basic_processing(trn_img_list, True)
valid_images, valid_labels, valid_images_len, valid_labels_len = basic_processing(vld_img_list, False)

TRAIN_STEP_PER_EPOCH = tf.math.ceil(train_images_len / BATCH_SIZE).numpy()
VALID_STEP_PER_EPOCH = tf.math.ceil(valid_images_len / BATCH_SIZE).numpy()

# 기본 Dataset 만들기

train_ds = make_tf_dataset(train_images, train_labels)
valid_ds = make_tf_dataset(valid_images, valid_labels)

train_ds = train_ds.repeat().batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE) # tf.data.experimental.AUTOTUNE
valid_ds = valid_ds.repeat().batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

base_model = EfficientNetB4(input_shape=(IMG_SIZE, IMG_SIZE, 3),
                            weights="imagenet",
                            include_top=False)
avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
output = tf.keras.layers.Dense(train_labels_len, activation="softmax")(avg)
model = tf.keras.Model(inputs=base_model.input, outputs=output)
#model = multi_gpu_model(model, gpus=3)


for layer in base_model.layers:
    layer.trainable = True


def build_lrfn(lr_start=0.00001, lr_max=0.00005, 
               lr_min=0.00001, lr_rampup_epochs=5, 
               lr_sustain_epochs=0, lr_exp_decay=.8):
    lr_max = lr_max * strategy.num_replicas_in_sync
Exemple #10
0
DATA_FORMAT = 'channels_last'
BATCH_SIZE = 32
EPOCHS = 1000
IMAGE_SIZE = 224
print('BATCH SIZE', BATCH_SIZE, 'EPOCHS', EPOCHS, 'IMAGE SIZE', IMAGE_SIZE)

set_image_data_format('channels_last')

categories = {
    'parasitized': 1,
    'uninfected': 0,
}

with tf.device('/device:GPU:0'):
    model = EfficientNetB4(weights=None,
                           input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
                           classes=2)
print('INPUT SHAPE', model.input_shape[1])


def process_img(path, image_size):
    img = load_img(path, target_size=(image_size, image_size))
    img_arr = img_to_array(img)
    img_arr = center_crop_and_resize(img_arr,
                                     image_size=image_size,
                                     crop_padding=0)
    return img_arr


def load_data(parasitized_path=PARASITIZED_PATH,
              uninfected_path=UNINFECTED_PATH):