Example #1
0
class CustomF1Score(Metric):
    def __init__(self, name=None, **kwargs):
        super(Metric, self).__init__(name=name, **kwargs)
        self.recall = Recall()
        self.precision = Precision()
        self.score = 0

    def update_state(self, y_true, y_pred, sample_weight=None):
        self.recall.update_state(y_true, y_pred, sample_weight=sample_weight)
        self.precision.update_state(y_true,
                                    y_pred,
                                    sample_weight=sample_weight)

        recall = self.recall.result()
        precision = self.precision.result()

        f1score = 2 * precision * recall / (precision + recall)
        self.score = f1score

    def result(self):
        return self.score

    def reset_states(self):
        self.recall.reset_states()
        self.precision.reset_states()
        self.score = 0
    def train_noisy_student(self,
                            ns_tr_datagen,
                            ns_val_datagen,
                            holdout_datagen=None):
        """
        """
        if self._test_code:
            self._train_config['epochs'] = 1

        metrics_d = {
            'root': [Recall(name='recall'),
                     Precision(name='precision')],
            'vowel': [Recall(name='recall'),
                      Precision(name='precision')],
            'consonant': [Recall(name='recall'),
                          Precision(name='precision')]
        }
        self.get_callbacks()
        step_size_train = ns_tr_datagen.n / ns_tr_datagen.batch_size
        step_size_valid = ns_val_datagen.n / ns_val_datagen.batch_size
        model = build_model(**self._model_config, metrics=metrics_d)

        train_history = model.fit(ns_tr_datagen,
                                  steps_per_epoch=step_size_train,
                                  validation_data=ns_val_datagen,
                                  validation_steps=step_size_valid,
                                  callbacks=self._callbacks,
                                  **self._train_config)
        if holdout_datagen:
            self.predict_holdout(model, holdout_datagen)
    def __init__(self,
                 model,
                 optimizer,
                 log_file_dir=None,
                 data_properties=None):
        """ Init. method.
        :param log_file_dir: If this is not None, then the training performance is stored in that log file directory.
        :param model: The model used for training.
        :param optimizer: Optimizer to be used for the weight updates.
        """
        self._data_properties = data_properties
        self._log_file_dir = log_file_dir
        self._optimizer = optimizer
        self._model = model

        self._tp_obj = TruePositives()
        self._tn_obj = TrueNegatives()
        self._fp_obj = FalsePositives()
        self._fn_obj = FalseNegatives()
        self._pre_obj = Precision()
        self._rec_obj = Recall()
        self._setup_changes = {'train': [], 'valid': []}
        self._loss_tt = {'train': [], 'valid': []}
        self._loss_ms = {'train': [], 'valid': []}
        self._loss_total = {'train': [], 'valid': []}
        self._acc = {'train': [], 'valid': []}
        self._tn = {'train': [], 'valid': []}
        self._tp = {'train': [], 'valid': []}
        self._fn = {'train': [], 'valid': []}
        self._fp = {'train': [], 'valid': []}
        self._rec = {'train': [], 'valid': []}
        self._pre = {'train': [], 'valid': []}
Example #4
0
def load_model():
    json_file = open('features_model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    loaded_model.load_weights("features_best_model.h5")
    opt = Adam(lr=0.0001)
    loaded_model.compile(loss='binary_crossentropy',
                         optimizer=opt,
                         metrics=[BinaryAccuracy(),
                                  Precision(),
                                  Recall()])

    json_file = open('hair_model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model2 = model_from_json(loaded_model_json)
    loaded_model2.load_weights("hair_best_model.h5")
    opt = Adam(lr=0.0001)
    loaded_model2.compile(loss='categorical_crossentropy',
                          optimizer=opt,
                          metrics=[BinaryAccuracy(),
                                   Precision(),
                                   Recall()])

    return loaded_model, loaded_model2
Example #5
0
    def __init__(self,
                 num_classes=None,
                 num_set=None,
                 detection=None,
                 save_model_name=None,
                 load=None,
                 **params):

        self.detection = detection
        self.num_set = num_set
        self.save_model_name = save_model_name
        self.num_classes = num_classes
        self.load = load

        if self.detection == "negative":
            class_id = 0
        elif self.detection == "positive":
            class_id = 1
        else:
            raise ValueError()

        if load:
            self.model = self.load_model(load_model_name=save_model_name,
                                         detection=detection,
                                         num_set=num_set)
            self.model_name = save_model_name
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizers.Adam(lr=Config.LR),
                               metrics=[
                                   'accuracy',
                                   CustomF1(name="f1", class_id=class_id),
                                   Precision(class_id=class_id),
                                   Recall(class_id=class_id)
                               ])
        else:
            self.model_name = save_model_name
            self.model = self.make_model()
            lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
                Config.LR, decay_steps=10000, decay_rate=0.96, staircase=True)
            self.model.compile(
                loss='binary_crossentropy',
                optimizer=optimizers.Adam(learning_rate=Config.LR),
                metrics=[
                    'accuracy',
                    CustomF1(name="f1", class_id=class_id),
                    Precision(class_id=class_id),
                    Recall(class_id=class_id)
                ])
        print(self.model.summary(), file=sys.stderr)

        tf.keras.utils.plot_model(self.model,
                                  'Fig/multi_input_model.png',
                                  show_shapes=True)
Example #6
0
    def build(self):

        model = models.Sequential()
        model.add(
            layers.Dense(500,
                         input_shape=(self.vector_size, ),
                         activation='relu'))
        model.add(layers.Dense(700, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1000, activation='relu'))
        model.add(layers.Dense(500, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(250, activation='relu'))
        model.add(layers.Dense(100, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(50, activation='relu'))
        model.add(layers.Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=[
                          'acc',
                          Precision(name="prec"),
                          Recall(name="rec"),
                          AUC(name='auc')
                      ])

        return model
Example #7
0
def compiled_model(INPUT_SHAPE: list, QNT_CLASS: int) -> tf.keras.Model:
    """
    A função retorna o modelo compilado.

    Return a compiled model.
  """
    INPUT_SHAPE = tuple(INPUT_SHAPE)

    base_model = MobileNetV2(include_top=False,
                             weights='imagenet',
                             input_tensor=Input(shape=INPUT_SHAPE,
                                                name='inputs'))

    for layer in base_model.layers:
        layer.trainable = False

    mod = base_model.output
    mod = AveragePooling2D()(mod)
    mod = Flatten()(mod)
    mod = Dropout(0.5)(mod)
    mod = Dense(QNT_CLASS, activation='softmax')(mod)

    mod_retorno = Model(inputs=base_model.input, outputs=mod)

    mod_retorno.compile(
        loss=CategoricalCrossentropy(),
        optimizer=Adagrad(),
        metrics=[Accuracy(), Precision(),
                 AUC(), FalseNegatives()])
    return mod_retorno
Example #8
0
    def __init__(self,
                 img_size=256,
                 channels=1,
                 nclasses=4,
                 cnet=[16, 32, 64, 128],
                 fname='unet_model.h5'):

        self.img_rows = img_size
        self.img_cols = img_size
        self.channels = channels
        self.n_classes = nclasses

        self.cnet = cnet
        self.upnet = cnet[:-1][::-1]

        self.pool_ker = 2
        self.conv_ker = 3
        self.d_out = 0.5

        self.modelSavedName = fname

        self.model = self.get_unet()
        self.model.compile(loss=self.custom_loss,
                           optimizer=Adam(),
                           metrics=[self.jaccard_coef,
                                    Precision()])
        self.model.summary()
def model_fit_eval(    
    train_paths_labels,
    val_paths_labels,
    test_paths_labels,
    eval_table=None,
    table=None,
    _resize=[250, 250],
    norm=255.0,
    batch_size=128,
    filters=4,
    lr=1e-3,
    epochs=30,
    verbose=1, 
    pretrained_weights=None,
    model_path=None,
    distance=absolute_distance,
    distance_output_shape=None,
    prediction_activation='sigmoid',
    train_ds=None,
    val_ds=None,
    callbacks=None,
    steps_per_epoch=None,
    validation_steps=None,
    prefix='',
#     shuffle=True,
    patience=3,
    kernel_initializer=initialize_weights,
    kernel_initializer_d=initialize_weights_dense,
    kernel_regularizer=l2(2e-4),
    kernel_regularizer_d=l2(1e-3),
    bias_initializer=initialize_bias,
    kernel_size_list=[(10, 10), (7, 7), (4, 4), (4, 4)],
    units=4*64,
    optimizer=None,
    loss='binary_crossentropy',
    metrics=['accuracy', Precision(name='Precision'), Recall(name='Recall')],
    tensorboard_histogram_freq=1,
    random_seed=2,
):
    seed(random_seed)
    set_seed(random_seed)
    model, _ = model_fit(table=table, train_paths_labels=train_paths_labels,
                            val_paths_labels=val_paths_labels, _resize=_resize, norm=norm,
                            batch_size=batch_size, filters=filters, lr=lr, epochs=epochs,
                            loss=loss, metrics=metrics, verbose=verbose,
                            pretrained_weights=pretrained_weights, model_path=model_path,
                            prediction_activation=prediction_activation, 
                            distance=distance, distance_output_shape=distance_output_shape,
                            train_ds=train_ds, val_ds=val_ds, callbacks=callbacks,
                            steps_per_epoch=steps_per_epoch, validation_steps=validation_steps,
                            prefix=prefix, patience=patience, tensorboard_histogram_freq=tensorboard_histogram_freq,
                        )
    scores = model_evaluate(model, images_labels_paths=test_paths_labels, norm=norm, _resize=_resize, verbose=verbose)
    if eval_table is not None:
        eval_table.add_row(scores)
        print(eval_table)
    else:
        print(scores)

    return model
Example #10
0
def vgg_model(img_shape=(224, 224, 3)):
    """
    Downloads mobilenet model without imagenet weights and adds the last layers
    for classification.

    Parameters
    ----------
    img_shape : tuple, optional
        DESCRIPTION. The default is (224,224,3).
        Shape of the input image for the model. Default is 224,224,3 as it is the
        default for MobileNet.
    Returns
    -------
    model : keras model
        Returns MobileNet model with classification layers added.

    """
    base_model = VGG16(input_shape=img_shape, include_top=False, weights=None)

    MN = base_model.output

    MN = Flatten()(MN)
    MN = Dense(4096, activation='relu')(MN)
    MN = Dropout(0.5)(MN)
    MN = Dense(4096, activation='relu')(MN)
    MN = Dropout(0.5)(MN)
    MN = Dense(28, activation='softmax')(MN)

    model = Model(base_model.input, MN)
    model.compile(optimizer=Adam(lr=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy', Precision(),
                           Recall()])
    model.summary()
    return model
def build_model(hp, input_dim1, input_dim2, output_dim, first_layer=False):

    params = hp.values.copy()

    ci = Input((input_dim1, ))
    si = Input((input_dim2, ))
    s = si

    for i in range(hp.Int('num_layers', 0, 2)):
        s = Dense(hp.Choice('branching_units' + str(i + 1),
                            units,
                            default=units[0]),
                  activation='relu')(s)
        s = Dropout(0.2)(s)

    x = Concatenate(axis=-1)([ci, s])

    x1 = Dense(hp.Choice('units_' + str(1), [16, 8], default=16),
               activation='relu')(x)

    x = Dense(output_dim, activation='softmax', name='output_1')(x1)

    model = Model(inputs=[ci, si], outputs=[x])

    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss={'output_1': 'categorical_crossentropy'},
                  metrics=['acc', f1, f2,
                           Precision(),
                           Recall(),
                           AUC()])

    return model
Example #12
0
def SegNet3D(shape, weights=None):
    inputs = Input(shape)
    conv, pool = inputs, inputs

    # encoder
    for numOfFilters in [4, 8, 16, 32]:
        conv = SegNet3DBlock(pool, layers=2, filters=numOfFilters)
        pool = MaxPooling3D((2, 2, 2))(conv)

    conv = SegNet3DBlock(pool, layers=3, filters=128)

    # decoder
    for numOfFilters in [64, 32, 16, 8]:
        upsam = UpSampling3D((2, 2, 2))(conv)
        conv = SegNet3DBlock(upsam, layers=2, filters=numOfFilters)

    conv = SegNet3DBlock(upsam, layers=2, filters=4)

    outputs = Conv3D(1, 1, activation='sigmoid')(conv)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=1e-4), loss='binary_crossentropy',
                  metrics=[Precision(), Recall(), AUC(), Accuracy()])
    model.summary()

    return model
Example #13
0
def get_model(n_hidden, matrix_path, summary=True, \
              init_weights_path='./model/init_weights.hdf5'):

    # model architecure
    inputs = layers.Input(shape=(None, ), name='input', dtype='int32')
    layer = FastText(0, 0, matrix_path, mask_zero=True, name='emb')(inputs)
    lstm = layers.LSTM(n_hidden, name='lstm')(layer)
    dense = layers.Dense(1, activation='sigmoid', name='dense')(lstm)
    model = Model(inputs=inputs, outputs=dense, name='model')

    if os.path.exists(init_weights_path):
        print('Initial weights found. Loading...')
        model.load_weights(init_weights_path)
    else:
        print('Initial weights not found. Saving...')
        model.save_weights(init_weights_path)

    # training setup
    initial_learning_rate = 0.1
    decay_steps = 50
    decay_rate = 1.0
    learning_rate_fn = tf.keras.optimizers.schedules.InverseTimeDecay(
        initial_learning_rate, decay_steps, decay_rate)
    optimizer = optimizers.RMSprop(learning_rate=learning_rate_fn)

    prec = Precision(name='prec')
    rec = Recall(name='rec')
    metrics = ['acc', prec, rec]
    loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    if summary:
        model.summary()

    return model
Example #14
0
def load_model(arch, LR, X, y, L1_coef, encoder=None):
    print('CREATING NEURAL NET')
    opt = Adam(learning_rate=LR)
    model = None

    if encoder == None:
        inputs = Input(shape=(X.shape[1], ))
        h = layers.Dense(arch[0],
                         kernel_regularizer=l1_l2(l1=L1_coef, l2=0.0),
                         activation='relu')(inputs)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[1], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[2], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[3], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[4], activation='relu')(h)
        output = layers.Dense(5, activation='softmax')(h)
        model = Model(inputs=inputs, outputs=output)

    else:
        print('Transfer encoder to classifier and fine-tuning...')
        model = Sequential([encoder, layers.Dense(5, activation='softmax')])

    model.compile(
        optimizer=opt,
        loss=tf.keras.losses.CategoricalCrossentropy(),
        metrics=['acc', AUC(), Recall(), Precision()],
    )

    return model
Example #15
0
 def create_transfer_model(self,
                           input_size,
                           n_categories,
                           weights='imagenet',
                           model=Xception):
     """
     Creates model without top and attaches new head to it
     Args:
         input_size (tuple(int, int, int)): 3-dimensional size of input to model
         n_categories (int): number of classification categories
         weights (str or arg): weights to use for model
         model (keras Sequential model): model to use for transfer
     Returns:
         keras Sequential model: model with new head
         """
     base_model = model(weights=weights,
                        include_top=False,
                        input_shape=input_size)
     self.model = self.add_model_head(base_model, n_categories)
     opt = Adam(learning_rate=.0001)
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=opt,
                        metrics=['accuracy',
                                 Precision(),
                                 Recall()])
     self._change_trainable_layers()
     return self.model
Example #16
0
def build_model(model_name, use_weights):
    """
    Prepare the model that should be trained.

    :param model_name: Name of the model to be trained
    :param use_weights: Use pre-trained weights bool
    :return: tf.keras.Model
    """
    if use_weights:
        weights = 'imagenet'
    else:
        weights = None
    if model_name == 'simple':
        model = small_model
    elif model_name == 'mobilenet':
        model = MobileNetV2
        model = model(input_shape=IMAGE_SIZE_WITH_CHANNELS,
                      weights=weights,
                      include_top=False)
        for layer in model.layers:
            layer.trainable = False
        x = model.output
        x = GlobalAveragePooling2D()(x)
        # NOTE removed regularization in order to be able to train at least
        # something that works given limited computational resources.
        x = Dense(units=2, activation='softmax')(x)
        model = Model(inputs=model.input, outputs=x)
    else:
        raise NotImplementedError(f'Model not supported: {model_name}')
    model.compile(optimizer='adam',
                  loss=CategoricalCrossentropy(from_logits=False),
                  metrics=['accuracy', Precision(),
                           Recall()])
    model.summary()
    return model
Example #17
0
def get_model(tokenizer, lstm_units):
    """
    Constructs the model,
    Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
    """
    # get the GloVe embedding vectors
    embedding_matrix = get_embedding_vectors(tokenizer)
    model = Sequential()
    model.add(
        Embedding(len(tokenizer.word_index) + 1,
                  EMBEDDING_SIZE,
                  weights=[embedding_matrix],
                  trainable=False,
                  input_length=SEQUENCE_LENGTH))

    model.add(LSTM(lstm_units, recurrent_dropout=0.2))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation="softmax"))
    # compile as rmsprop optimizer
    # aswell as with recall metric
    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", Precision(),
                           Recall()])
    model.summary()
    return model
Example #18
0
    def build(self):
        model = models.Sequential()
        model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          input_shape=(self.input_width_height,
                                       self.input_width_height,
                                       self.channels)))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Flatten())
        model.add(layers.Dropout(0.5))  # Dropout for regularization
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=[
                          'acc',
                          Precision(name="prec"),
                          Recall(name="rec"),
                          AUC(name='auc')
                      ])

        return model
Example #19
0
def build_base_model(input_size):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    norm_1 = Lambda(lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                    name="norm_input_1")(in_1)
    norm_2 = Lambda(lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                    name="norm_input_2")(in_2)
    norm_mul = Multiply(name="multiply_norms")([norm_1, norm_2])

    model = Multiply(name="pointwise_multiply")([in_1, in_2])
    model = Lambda(lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                   name="sum")(model)

    model = Lambda(lambda tensors: tf.divide(tensors[0], tensors[1]),
                   name="divide")([model, norm_mul])
    model = ValueMinusInput(1, name="one_minus_input")(model)

    model = LessThan(0.4)(model)
    model_out = Lambda(lambda tensor: tf.cast(tensor, tf.float32),
                       name="cast")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=MeanSquaredError(),
                  optimizer=SGD(),
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])
    return model
def build_triplet_classifier_model(extractor_model,
                                   dist_type='eucl',
                                   threshold=1.0):
    anchor_in = Input(shape=(224, 224, 3), name="anchor_in")
    anchor_out = extractor_model(anchor_in)

    compare_in = Input(shape=(224, 224, 3), name="compare_in")
    compare_out = extractor_model(compare_in)

    if dist_type == 'cos':
        dist = CosineDistance(name="dist")([anchor_out, compare_out])
    else:
        dist = EuclidianDistanceSquared(name="dist")([anchor_out, compare_out])

    model = Lambda(lambda x: tf.cast((x < threshold), tf.float32))(dist)
    model = Model([anchor_in, compare_in], model)
    model.compile(optimizer=Adamax(),
                  loss=None,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=self.embedding_output_dim,
                                   mask_zero=True),
            Dropout(0.3),
            keras.layers.Conv1D(filters=64,
                                kernel_size=3,
                                input_shape=(voc_size,
                                             self.embedding_output_dim),
                                activation=tf.nn.relu),
            #keras.layers.MaxPooling1D(3),
            #keras.layers.Bidirectional(keras.layers.LSTM(64)),
            keras.layers.GlobalAveragePooling1D(),
            Dropout(0.3),
            keras.layers.Dense(theme_count, activation=tf.nn.sigmoid)
        ])

        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1),
                      loss=WeightedBinaryCrossEntropy(
                          themes_weight.weight_array()),
                      metrics=[
                          AUC(multi_label=True),
                          BinaryAccuracy(),
                          TruePositives(),
                          TrueNegatives(),
                          FalseNegatives(),
                          FalsePositives(),
                          Recall(),
                          Precision()
                      ],
                      run_eagerly=True)

        model.summary()
        self.__model__ = model

        if self.__plot_directory is not None:
            self.plot_model(self.__plot_directory)

        # Fix for https://github.com/tensorflow/tensorflow/issues/38988
        model._layers = [
            layer for layer in model._layers if not isinstance(layer, dict)
        ]

        callbacks = [ManualInterrupter(), keras_callback]

        model.fit(dataset.trainData,
                  epochs=self.epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks)
Example #22
0
def train():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)

    # TRAINING IMAGES
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')

    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    arch = ResUnetPlusPlus(input_size=256)
    model = arch.build_model()
    # model.summary()
    optimizer = SGD(lr=1e-5, momentum=0.9, nesterov=True)
    metrics = [dice_coef, Recall(), Precision()]

    model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=metrics)

    model_checkpoint = ModelCheckpoint(experiment_path + '/' + 'weights-resunet++.{epoch:02d}-{loss:.2f}.h5', monitor='val_dice_coef',
                                       save_best_only=True, save_weights_only=False, mode='max', period=10)
    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    history = model.fit(imgs_train, imgs_mask_train, batch_size=10,
                        epochs=130, verbose=1,
                        validation_split=0.2, shuffle=True,
                        callbacks=[model_checkpoint])

    # Saving our predictions in the directory 'preds'
    plt.plot(history.history['dice_coef'])
    plt.plot(history.history['val_dice_coef'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
Example #23
0
 def __init__(self, model_percnet, model_wholenet, optimizer, verbose):
     self.model_percnet = model_percnet
     self.model_wholenet = model_wholenet
     self.optimizer = optimizer
     self.verbose = verbose
     self.model_percnet.compile(loss=mean_squared_error, optimizer=self.optimizer)
     self.model_wholenet.compile(loss=categorical_crossentropy, optimizer=self.optimizer,
                                 metrics=[CategoricalAccuracy(), Precision(), Recall()])
Example #24
0
    def create_model(self):
        '''This will create the hard encoded model'''

        if self.load_model:
            self.model = load_model(
                os.path.join(MODEL_DIRECTORY, self.load_model))
        else:
            self.model = Sequential()
            self.model.add(
                Conv2D(64, (2, 2),
                       input_shape=(64, 64, 3),
                       padding='valid',
                       name='Convolution-1',
                       activation='relu'))
            self.model.add(
                Conv2D(64, (2, 2),
                       padding='valid',
                       name='Convolution-2',
                       activation='relu'))
            self.model.add(MaxPooling2D(pool_size=(2, 2), name='Pooling-1'))
            self.model.add(Dropout(0.05))

            self.model.add(
                Conv2D(128, (2, 2),
                       padding='valid',
                       name='Convolution-3',
                       activation='relu'))
            self.model.add(
                Conv2D(128, (2, 2),
                       padding='valid',
                       name='Convolution-4',
                       activation='relu'))
            self.model.add(MaxPooling2D(pool_size=(2, 2), name='Pooling-2'))
            self.model.add(Dropout(0.10))

            self.model.add(
                Conv2D(256, (2, 2),
                       padding='valid',
                       name='Convolution-5',
                       activation='relu'))
            self.model.add(
                Conv2D(256, (2, 2),
                       padding='valid',
                       name='Convolution-6',
                       activation='relu'))
            self.model.add(MaxPooling2D(pool_size=(2, 2), name='Pooling-3'))
            self.model.add(Dropout(0.1))

            self.model.add(Flatten())
            self.model.add(Dense(512, name='Dense-1', activation='relu'))
            self.model.add(Dropout(0.1))
            self.model.add(Dense(1, name='Dense-2', activation='sigmoid'))

            self.model.compile(loss='binary_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy',
                                        Precision(),
                                        Recall()])
Example #25
0
def build_model(shape, classes, hparams):
    print(hparams)
    N = shape[2]
    F = shape[3] - N
    frames = shape[1]

    def get_feature_matrix(x, frame, N, F):
        x = tf.slice(x, [0, frame, 0, N], [-1, 1, N, F])
        x = tf.squeeze(x, axis=[1])
        return x

    def get_correlation_matrix(x, frame, N, F):
        x = tf.slice(x, [0, frame, 0, 0], [-1, 1, N, N])
        x = tf.squeeze(x, axis=[1])
        return x

    input_0 = tf.keras.Input((frames, N, F + N))

    layers = []
    for frame in range(frames):
        feature_matrix = tf.keras.layers.Lambda(get_feature_matrix,
                                                arguments={
                                                    'frame': frame,
                                                    'N': N,
                                                    'F': F
                                                })(input_0)

        correlation_matrix = tf.keras.layers.Lambda(get_correlation_matrix,
                                                    arguments={
                                                        'frame': frame,
                                                        'N': N,
                                                        'F': F
                                                    })(input_0)

        x = sp.layers.GraphConv(
            hparams['output_shape'])([feature_matrix, correlation_matrix])
        x = tf.keras.layers.Flatten()(x)
        layers.append(x)

    combine = tf.keras.layers.Concatenate()(layers)
    reshape = tf.keras.layers.Reshape(
        (frames, N * hparams['output_shape']))(combine)
    lstm = tf.keras.layers.LSTM(hparams['hidden_units'])(reshape)
    dropout = tf.keras.layers.Dropout(hparams['dropout'])(lstm)
    out = tf.keras.layers.Dense(classes, activation='softmax')(dropout)

    model = tf.keras.Model(inputs=[input_0], outputs=out)
    model.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=hparams['learning_rate']),
                  loss='categorical_crossentropy',
                  metrics=[
                      'accuracy',
                      Recall(class_id=0, name='recall'),
                      Precision(class_id=0, name='precision'),
                  ])
    model.summary()
    model.save('logs/plot_gnn.h5')
    return model
Example #26
0
def F1_score(y_t, y_p, weights):
    """Computes the weighted F1_score for each label
    Argument: ground truth label Y (3D flattened into 2D), prediction (3D flattened into 2D), class weights
    Returns: array of F1_score for each label, and weighted F1 score"""

    P = Precision()
    R = Recall() #label per label evaluation
    F1_score_per_label = [] #store per label
    P_per_label = []
    R_per_label = []
    F1_tot = 0 #weighted sum

    for i in range(8):
      P.update_state( y_t[:,i], y_p[:,i] )
      R.update_state( y_t[:,i], y_p[:,i] )
      p = P.result().numpy()
      r = R.result().numpy()
      P.reset_states()
      R.reset_states()
      if p+r == 0:
        f1 = 0
      else:
        f1 = 2*p*r/ (p+r)
      F1_score_per_label.append(f1)
      P_per_label.append(p)
      R_per_label.append(r)

      F1_tot += f1*weights[i]

    return F1_score_per_label, P_per_label, R_per_label, F1_tot
def define_metrics():
    '''Define the training metrics.'''
    return [
        BinaryAccuracy(name='accuracy'),
        F1_score,
        Precision(name='precision'),
        Recall(name='recall'),
        AUC(name='auc'),
    ]
 def get_custom_metrics():
     custom_metrics: dict = {
         "precision": Precision(),
         "recall": Recall(),
         "true_positives": TruePositives(),
         "true_negatives": TrueNegatives(),
         "false_negatives": FalseNegatives(),
         "false_positives": FalsePositives()
     }
     return custom_metrics
Example #29
0
 def get_metrics():
     metrics = []
     acc_metrics = CategoricalAccuracy()
     metrics.append(acc_metrics)
     auc_metrics = AUC()
     metrics.append(auc_metrics)
     precision = Precision(name='precision')
     metrics.append(precision)
     recall = Recall(name='recall')
     metrics.append(recall)
     return metrics
Example #30
0
def build_cos_model(input_size,
                    cos_dist_lvl,
                    n_neurons,
                    n_layers,
                    batch_norm=True,
                    loss=MeanSquaredError(),
                    optimizer=SGD(learning_rate=0.05, momentum=0.025)):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    if cos_dist_lvl == 0:
        model = Concatenate(name="concatenate")([in_1, in_2])
    else:
        model = Multiply(name="pointwise_multiply")([in_1, in_2])
        if cos_dist_lvl >= 2:
            norm_1 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_1")(in_1)
            norm_2 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_2")(in_2)
            norm_mul = Multiply(name="multiply_norms")([norm_1, norm_2])
            model = Lambda(lambda tensors: tf.divide(tensors[0], tensors[1]),
                           name="divide")([model, norm_mul])
        if cos_dist_lvl >= 3:
            model = Lambda(
                lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                name="sum")(model)
        if cos_dist_lvl >= 4:
            model = ValueMinusInput(1, name="one_minus_input")(model)

    if batch_norm:
        model = BatchNormalization(name="input_normalization")(model)

    for i in range(n_layers):
        model = Dense(n_neurons,
                      activation='sigmoid',
                      name="dense_{}".format(i))(model)
    model_out = Dense(1, activation='sigmoid', name="classify")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model