Beispiel #1
0
def build_models():
    # Model building
    # Feature extraction layers - transfer learning

    # input shape
    keras_input_shape = (KERAS_IMG_SIZE[0], KERAS_IMG_SIZE[1], 3)
    keras_input = Input(shape=keras_input_shape)

    base_model = Xception(
        include_top=False,  # no dense layers in the end to classify so we can make our own layer
        weights=FEATURE_WEIGHTS_PATH,
        input_shape=keras_input_shape
    )
    base_model.trainable = False

    # Classifier
    x = base_model(keras_input, training=False)
    x = GlobalAveragePooling2D()(x)
    classifier_output = Dense(2, activation='softmax')(x)
    classifier = Model(keras_input, classifier_output)

    # Regressor
    regressor_output = Dense(4, activation='sigmoid')(x)
    regressor = Model(keras_input, regressor_output)

    return classifier, regressor
def build_xception(height,
                   width,
                   depth,
                   include_top=False,
                   weights='imagenet'):
    xception_model = Xception(weights=weights,
                              include_top=include_top,
                              input_shape=(height, width, depth))

    model = Sequential()
    model.add(xception_model)

    model.add(Flatten())
    model.add(Dense(512))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(256))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(128))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(2))
    model.add(Activation("softmax"))

    xception_model.trainable = False

    return model
Beispiel #3
0
def build_model(config):
    """Builds a keras model using Xception network as core and following
    instructions on the configuration file.

    Args:
        config (dict): Configuration dictionary

    Returns:
        keras.model: Keras model
    """
    input_shape = config["model"]["input_shape"] + [3]
    i = Input(
        input_shape,
        name="model_input",
    )
    x = preprocess_input(i)
    core = Xception(input_shape=input_shape, include_top=False, pooling="avg")

    if config["model"]["freeze_convolutional_layers"]:
        print("Freezing convolutional layers")
        core.trainable = False

    x = core(x)
    outputs = []
    for clf_layer in config["model"]["target_encoder"]:
        n_classes = len(config["model"]["target_encoder"][clf_layer])
        outputs.append(
            Dense(units=n_classes,
                  activation="softmax",
                  name=f"{clf_layer}_clf")(x))
    model = Model(inputs=i, outputs=outputs)
    return model
 def train_model(self):
     """ Training the model """
     print("Training the model")
     LR = 1e-3
     epochs = 200
     callbacks = [
         EarlyStopping(monitor='val_loss',
                       min_delta=0,
                       patience=30,
                       verbose=0,
                       mode='auto'),
         ModelCheckpoint('model.h5',
                         monitor='val_loss',
                         mode='min',
                         save_best_only=True),
         ReduceLROnPlateau(monitor='val_loss',
                           factor=0.1,
                           patience=10,
                           verbose=0,
                           mode='auto',
                           min_delta=0.0001,
                           cooldown=0,
                           min_lr=0)
     ]
     # Pre trained model Xception without fully connected layers
     base_model = Xception(input_shape=(self.img_size[0], self.img_size[1],
                                        3),
                           include_top=False,
                           weights='imagenet')
     # Unfreeze the layers
     base_model.trainable = True
     x = GlobalMaxPooling2D()(base_model.output)
     x = Dense(512, activation='relu')(x)
     x = Dense(10, activation='relu')(x)
     output = Dense(1, activation='linear')(x)
     model = Model(inputs=base_model.input, outputs=output)
     model.compile(loss='mse',
                   optimizer=Adam(learning_rate=LR),
                   metrics=[self.mae_in_months])
     print(base_model.summary())
     print(model.summary())
     history = model.fit_generator(
         self.train_datagen.flow(self.x_train,
                                 self.y_train,
                                 batch_size=self.batch_size),
         steps_per_epoch=len(self.x_train) / self.batch_size,
         validation_data=self.val_datagen.flow(self.x_val,
                                               self.y_val,
                                               batch_size=self.batch_size),
         validation_steps=len(self.x_val) / self.batch_size,
         callbacks=callbacks,
         epochs=epochs,
         verbose=1)
     self.plot_it(history)
     model.load_weights('model.h5')
     pred = self.mean_bone_age + self.std_bone_age * (model.predict(
         self.x_val, batch_size=self.batch_size, verbose=True))
     actual = self.mean_bone_age + self.std_bone_age * (self.y_val)
    def model(self, trainable=False):
        model = Xception(include_top=False, weights='imagenet')
        model.trainable = trainable

        inputs = tf.keras.Input(shape=(150, 150, 3))
        x = model(inputs, training=trainable)
        x = tf.keras.layers.GlobalAveragePooling2D()(x)
        outputs = tf.keras.layers.Dense(1)(x)
        return tf.keras.Model(inputs, outputs)
def get_xception():
    """Returns a Xception pretrained neural net.

        The function returns a partially pretrained Xception neural net.

        Returns:
            A modified Xception semi-pre-trained NN instance
    """

    model = Xception(include_top=False)

    model.trainable = False

    core_output = model.layers[45].output

    # Weighting Xception output via channel and spatial Attention

    channel_attention_map = channel_attention(core_output)
    channel_weighted = core_output * channel_attention_map
    spatial_attention_map = spatial_attention(channel_weighted)
    core_output = channel_weighted * spatial_attention_map

    for _ in range(5):

        output = relu(core_output)
        output = SeparableConvolution2D(728, (3, 3),
                                        padding='same',
                                        depthwise_regularizer=L2(0.2),
                                        pointwise_regularizer=L2(0.03))(output)
        output = BatchNormalization()(output)
        output = Dropout(0.3)(output)

        output = relu(output)
        output = SeparableConvolution2D(728, (3, 3),
                                        padding='same',
                                        depthwise_regularizer=L2(0.2),
                                        pointwise_regularizer=L2(0.03))(output)
        output = BatchNormalization()(output)
        output = Dropout(0.3)(output)

        core_output = Add()[output, core_output]

        # Output Weighting via Attention

        channel_attention_map = channel_attention(core_output)
        channel_weighted = core_output * channel_attention_map
        spatial_attention_map = spatial_attention(channel_weighted)
        core_output = channel_weighted * spatial_attention_map

    model_output = GlobalAvgPool2D()(core_output)

    model_output = Dense(1, activation='sigmoid')(model_output)

    model = Model(inputs=model.input, outputs=model_output)

    return model
    def get_model(self, freeze_base_model=False, freeze_initial_layers=0):
        # pretrained_model = Xception(weights="imagenet", include_top=False, input_shape=self.shape)
        input = Input(self.shape)
        pretrained_model = Xception(weights="imagenet", include_top=False)

        if freeze_base_model:
            pretrained_model.trainable = False
        else:
            for i, layer in enumerate(pretrained_model.layers):
                if i >= freeze_initial_layers:
                    break
                layer.trainable = False

        outputs = pretrained_model(input, training=False)
        outputs = GlobalAveragePooling2D()(outputs)
        outputs = Dropout(0.25)(outputs)
        outputs = Dense(1024, activation='relu')(outputs)
        outputs = Dense(256, activation='relu')(outputs)
        outputs = Dense(64, activation='relu')(outputs)
        ppoi = Dense(1, activation='sigmoid',
                     name='pe_present_on_image')(outputs)
        rlrg1 = Dense(1, activation='sigmoid',
                      name='rv_lv_ratio_gte_1')(outputs)
        rlrl1 = Dense(1, activation='sigmoid',
                      name='rv_lv_ratio_lt_1')(outputs)
        lspe = Dense(1, activation='sigmoid', name='leftsided_pe')(outputs)
        cpe = Dense(1, activation='sigmoid', name='chronic_pe')(outputs)
        rspe = Dense(1, activation='sigmoid', name='rightsided_pe')(outputs)
        aacpe = Dense(1, activation='sigmoid',
                      name='acute_and_chronic_pe')(outputs)
        cnpe = Dense(1, activation='sigmoid', name='central_pe')(outputs)
        indt = Dense(1, activation='sigmoid', name='indeterminate')(outputs)

        self.model = Model(inputs=input,
                           outputs={
                               'pe_present_on_image': ppoi,
                               'rv_lv_ratio_gte_1': rlrg1,
                               'rv_lv_ratio_lt_1': rlrl1,
                               'leftsided_pe': lspe,
                               'chronic_pe': cpe,
                               'rightsided_pe': rspe,
                               'acute_and_chronic_pe': aacpe,
                               'central_pe': cnpe,
                               'indeterminate': indt
                           })

        self._compile_model()

        return self.model
Beispiel #8
0
                                                 class_mode='categorical',
                                                 target_size=TARGET_SIZE,
                                                 shuffle=False,
                                                 batch_size=BATCH_SIZE)

# Model definition
print('Creating model...')
input_ = Input(shape=(299, 299, 3))

base_model = Xception(
    weights='imagenet',
    include_top=False,
    input_tensor=input_,
)

base_model.trainable = False

for i in range(-6, 0):
    base_model.layers[i].trainable = True

custom_model = base_model.output
custom_model = GlobalAveragePooling2D()(custom_model)
custom_model = Dense(1024, activation='relu')(custom_model)
custom_model = Dropout(0.2)(custom_model)
custom_model = Dense(512, activation='relu')(custom_model)
custom_model = Dropout(0.2)(custom_model)
custom_model = Dense(128, activation='relu')(custom_model)
custom_model = Dropout(0.2)(custom_model)
custom_model = Dense(NUM_CLASSES, activation='softmax')(custom_model)

model = Model(base_model.input, custom_model)