def my_cnn_network(width, dicom_path, masks_path, prag, train_dim, test_dim):
    x_train, y_train = train_the_network(dicom_path, masks_path)
    x_train = x_train.reshape(train_dim, width, width, 1)
    y_train = y_train.reshape(train_dim, width, width, 1)

    # arhitectura
    inp = Input((width, width, 1))
    l = Conv2D(32, (2, 2), padding='same')(inp)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(64, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(256, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2DTranspose(128, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(64, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('softmax')(l)

    MyModel = Model(inp, decoded)

    MyModel.summary()
    print()

    MyModel.compile(optimizer='adagrad',
                    loss=losses.mse,
                    metrics=[
                        'accuracy',
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.TruePositives(),
                        metrics.TrueNegatives(),
                        metrics.FalseNegatives(),
                        metrics.FalsePositives()
                    ])
    MyModel.fit(x_train, y_train, batch_size=32, epochs=1000)
    MyModel.save('model.h5')
コード例 #2
0
 def test_unweighted_all_incorrect(self):
     r_obj = metrics.Recall(thresholds=[0.5])
     inputs = np.random.randint(0, 2, size=(100, 1))
     y_pred = K.constant(inputs)
     y_true = K.constant(1 - inputs)
     result = r_obj(y_true, y_pred)
     assert np.isclose(0, K.eval(result))
コード例 #3
0
    def my_model(self):
        input_shape = (self.dim, self.dim, 3)
        nclass = self.nclass

        input_ = Input(shape=input_shape)

        conv1 = self.conv2d_bn(input_, 64, kernel_size=(3, 3), strides=(2, 2))
        pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                          padding='same')(conv1)

        conv2 = self.basic_block(64, 2, is_first_layer=False)(pool1)
        pool2 = GlobalAvgPool2D()(conv2)

        output_ = Dense(nclass, activation='softmax')(pool2)

        model = Model(inputs=input_, outputs=output_)
        model.compile(loss="categorical_crossentropy",
                      optimizer="adagrad",
                      metrics=[
                          "accuracy",
                          metrics.AUC(),
                          metrics.Precision(),
                          metrics.Recall()
                      ])
        return model
コード例 #4
0
def train_model(dataset, model):
    epochs = 15
    #     epochs = 0
    lr = 1e-4
    size = 300
    wd = 1e-2
    bs = 8  # reduce this if you are running out of GPU memory
    pretrained = True
    alpha_fl = [0.4, 0.4, 0.15, 0.05]
    gamma_fl = 2

    config = {
        'epochs': epochs,
        'lr': lr,
        'size': size,
        'wd': wd,
        'bs': bs,
        'alpha_fl': alpha_fl,
        'gamma_fl': gamma_fl,
        'pretrained': pretrained
    }

    wandb.config.update(config)

    model.compile(
        optimizer=optimizers.Adam(lr=lr),
        loss=[categorical_focal_loss(alpha=alpha_fl, gamma=gamma_fl)],
        metrics=[
            metrics.Precision(top_k=1, name='precision'),
            metrics.Recall(top_k=1, name='recall'),
            metrics.Accuracy(name='accuracy')
        ])
    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=7,
                               mode='min',
                               verbose=1)

    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)

    train_data, valid_data = datasets_keras.load_dataset(dataset, bs)
    _, ex_data = datasets_keras.load_dataset(dataset, 10)
    model.fit_generator(train_data,
                        validation_data=valid_data,
                        epochs=epochs,
                        callbacks=[
                            early_stop, reduce_on_plateau,
                            WandbCallback(input_type='image',
                                          output_type='segmentation_mask',
                                          validation_data=ex_data[0])
                        ])
コード例 #5
0
    def test_unweighted_top_k_and_threshold(self):
        r_obj = metrics.Recall(thresholds=.7, top_k=2)

        y_pred = K.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
        y_true = K.constant([1, 1, 1, 0, 1], shape=(1, 5))
        result = r_obj(y_true, y_pred)
        assert np.isclose(0.25, K.eval(result))
        assert np.isclose(1, K.eval(r_obj.true_positives))
        assert np.isclose(3, K.eval(r_obj.false_negatives))
コード例 #6
0
 def test_recall(self, distribution):
     # True positive is 2, false negative 1, precision is 2/3 = 0.6666667
     label_prediction = ([0, 1, 1, 1], [1, 0, 1, 1])
     with distribution.scope():
         recall = metrics.Recall()
         self.evaluate([v.initializer for v in recall.variables])
         updates = distribution.run(recall, args=label_prediction)
         self.evaluate(updates)
     self.assertAllClose(recall.result(), 0.6666667)
コード例 #7
0
 def test_weighted_with_threshold(self):
     r_obj = metrics.Recall(thresholds=[0.5, 1.])
     y_true = K.constant([[0, 1], [1, 0]], shape=(2, 2))
     y_pred = K.constant([[1, 0], [0.6, 0]], shape=(2, 2), dtype='float32')
     weights = K.constant([[1, 4], [3, 2]], shape=(2, 2), dtype='float32')
     result = r_obj(y_true, y_pred, sample_weight=weights)
     weighted_tp = 0 + 3.
     weighted_positives = (0 + 3.) + (4. + 0.)
     expected_recall = weighted_tp / weighted_positives
     assert np.allclose([expected_recall, 0], K.eval(result), 1e-3)
コード例 #8
0
 def __init__(self):
     self.num_conv2d_layers=1
     self.filters_2d=[16,32]
     self.kernel_size_2d=[[3,3], [3,3]]
     self.mpool_size_2d=[[2,2], [2,2]]
     self.metric_type_map = {
         'precision' : metrics.Precision(),
         'recall' : metrics.Recall(),
         'AUC' : metrics.AUC(),
         'accuracy' : metrics.Accuracy(),
     }
コード例 #9
0
    def compile_model(self):
        learning_rate = 0.000625  # initial learning rate

        self.model.compile(
            optimizer=Adam(learning_rate=learning_rate),
            loss="binary_crossentropy",
            metrics=["accuracy",
                     metrics.Precision(),
                     metrics.Recall(), f1]  #,
            # sample_weight_mode="temporal"
        )
コード例 #10
0
 def test_weighted(self):
     r_obj = metrics.Recall()
     y_pred = K.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
     y_true = K.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
     result = r_obj(y_true,
                    y_pred,
                    sample_weight=K.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
     weighted_tp = 3.0 + 1.0
     weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
     expected_recall = weighted_tp / weighted_t
     assert np.isclose(expected_recall, K.eval(result))
コード例 #11
0
    def __init__(self, X_train, y_train, X_val, y_val, X_test, y_test,
                 **kwargs):

        self.model_dir = MODEL_DIR

        (self.X_train, self.y_train) = (X_train, y_train)
        (self.X_val, self.y_val) = (X_val, y_val)
        (self.X_test, self.y_test) = (X_test, y_test)

        self.metrics = [metrics.Recall(), metrics.Precision(), metrics.AUC()]
        self.model = self.set_architecture()
        self.callback = self.compile_model()
コード例 #12
0
def build_model():
    model = Sequential()
    model.add(layers.Dense(30, activation='relu', input_shape=[30]))

    model.add(layers.Dropout(0.8))
    model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dropout(0.8))
    model.add(layers.Dense(2, activation='sigmoid'))
    model.compile(optimizer=optimizers.Adam(),
                  loss=losses.binary_crossentropy,
                  metrics=['accuracy', metrics.Precision(), metrics.Recall(), metrics.F1])
    model.summary()
    return model
コード例 #13
0
def initializeNN():

    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LeakyReLU
    from keras.layers import Dropout
    from keras import regularizers
    from keras import metrics
    #import tensorflow_addons as tfa

    ### Define metrics
    metrics = [
        metrics.CategoricalAccuracy(name="accuracy"),
        metrics.FalseNegatives(name="fn"),
        metrics.FalsePositives(name="fp"),
        metrics.TrueNegatives(name="tn"),
        metrics.TruePositives(name="tp"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name='auc')  #,
        #tfa.metrics.CohenKappa(name='kappa')
    ]

    # define the keras model
    nn = Sequential()
    nn.add(Dense(256, input_dim=102,
                 kernel_regularizer='l1'))  #, activation='relu'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(128))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(31, activation='softmax'))

    nn.compile(loss='categorical_crossentropy',
               optimizer='Adamax',
               metrics=metrics)

    return nn
コード例 #14
0
    def test_unweighted_top_k_and_class_id(self):
        r_obj = metrics.Recall(class_id=2, top_k=2)

        y_pred = K.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
        y_true = K.constant([0, 1, 1, 0, 0], shape=(1, 5))
        result = r_obj(y_true, y_pred)
        assert np.isclose(1, K.eval(result))
        assert np.isclose(1, K.eval(r_obj.true_positives))
        assert np.isclose(0, K.eval(r_obj.false_negatives))

        y_pred = K.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
        y_true = K.constant([0, 1, 1, 0, 0], shape=(1, 5))
        result = r_obj(y_true, y_pred)
        assert np.isclose(0.5, K.eval(result))
        assert np.isclose(1, K.eval(r_obj.true_positives))
        assert np.isclose(1, K.eval(r_obj.false_negatives))
コード例 #15
0
    def test_weighted_top_k(self):
        r_obj = metrics.Recall(top_k=3)
        y_pred1 = K.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
        y_true1 = K.constant([0, 1, 1, 0, 1], shape=(1, 5))
        K.eval(
            r_obj(y_true1,
                  y_pred1,
                  sample_weight=K.constant([[1, 4, 2, 3, 5]])))

        y_pred2 = K.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
        y_true2 = K.constant([1, 0, 1, 1, 1], shape=(1, 5))
        result = r_obj(y_true2, y_pred2, sample_weight=K.constant(3))

        tp = (2 + 5) + (3 + 3)
        positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
        expected_recall = float(tp) / positives
        assert np.isclose(expected_recall, K.eval(result))
def train_cnn_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 3, int(len(x_train[0])/3), 1))

  y_train = array(y_train)

  #create model
  cnn_model = Sequential()
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  input_shape=(3,21,1), 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(MaxPooling2D(2,2))
  cnn_model.add(Flatten())
  cnn_model.add(Dense(512, activation = 'relu')) 
  cnn_model.add(Dense(1, activation='sigmoid'))

  # compile and fit
  cnn_model.compile(optimizer='Adam',
                loss='binary_crossentropy',
                metrics=['acc', 
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  cnn_history = cnn_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
    
  print("finish training cnn model")
  return cnn_model, cnn_history
コード例 #17
0
def train_model(dataset, model):
    epochs = 40
    #     epochs = 0
    lr = 1e-4
    size = 300
    wd = 1e-2
    bs = 8  # reduce this if you are running out of GPU memory
    pretrained = True
    """For Focal Loss 
    we need alpha and gamma initialized
    """
    alpha = [[.25, .25, .25, .25, .25, .25]]
    gamma = 2

    config = {
        'epochs': epochs,
        'lr': lr,
        'size': size,
        'wd': wd,
        'bs': bs,
        'pretrained': pretrained,
    }

    wandb.config.update(config)

    model.compile(optimizer=optimizers.Adam(lr=lr),
                  loss=[categorical_focal_loss(alpha, gamma)],
                  metrics=[
                      metrics.Precision(top_k=1, name='precision'),
                      metrics.Recall(top_k=1, name='recall'),
                      FBeta(name='f_beta')
                  ])

    train_data, valid_data = datasets_keras.load_dataset(dataset, bs)
    _, ex_data = datasets_keras.load_dataset(dataset, 10)
    model.fit_generator(train_data,
                        validation_data=valid_data,
                        epochs=epochs,
                        callbacks=[
                            WandbCallback(input_type='image',
                                          output_type='segmentation_mask',
                                          validation_data=ex_data[0])
                        ])
コード例 #18
0
    def test_config(self):
        r_obj = metrics.Recall(name='my_recall',
                               thresholds=[0.4, 0.9],
                               top_k=15,
                               class_id=12)
        assert r_obj.name == 'my_recall'
        assert len(r_obj.weights) == 2
        assert ([v.name for v in r_obj.weights
                 ] == ['true_positives:0', 'false_negatives:0'])
        assert r_obj.thresholds == [0.4, 0.9]
        assert r_obj.top_k == 15
        assert r_obj.class_id == 12

        # Check save and restore config
        r_obj2 = metrics.Recall.from_config(r_obj.get_config())
        assert r_obj2.name == 'my_recall'
        assert len(r_obj2.weights) == 2
        assert r_obj2.thresholds == [0.4, 0.9]
        assert r_obj2.top_k == 15
        assert r_obj2.class_id == 12
コード例 #19
0
    def create_model(self, activation_function, alpha):
        # create the model
        self.activation_function = activation_function

        self.model = Sequential()

        self.alpha = alpha
        hidden_nodes = int(
            len(self.training_traces) / (alpha *
                                         (len(self.labels) + self.num_target)))
        self.hidden_nodes = hidden_nodes

        self.model.add(LSTM(hidden_nodes))
        self.model.add(Dense(1, activation=activation_function))
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=[
                               'accuracy',
                               metrics.AUC(),
                               metrics.Precision(),
                               metrics.Recall()
                           ])
def train_lstm_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 1, len(x_train[0])))
  print("x_train.shape", x_train.shape)
  print(x_train[0])

  y_train = array(y_train)
  print("y_train.shape", y_train.shape)

  # imrpove log: use batch size 16 and add one more lstm layer

  lstm_model = Sequential()
  lstm_model.add(LSTM(16, 
                input_shape=(1, 63),
                return_sequences=True))
  lstm_model.add(LSTM(16, ))
  lstm_model.add(layers.Dense(1, activation='sigmoid'))
  lstm_model.compile(optimizer='rmsprop',
                loss='binary_crossentropy',
                metrics=['acc',
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  lstm_history = lstm_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
  print("finish training lstm model")
  return lstm_model, lstm_history
コード例 #21
0
def set_model_prediction_multi_label(model_path, test_set, custom_objects, with_f1=False):
    """Compute metrics of a set of models for multilabel model. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set[['label_culture','label_coffee']].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0))
            
    predictions = []
    for pred in zip(*prediction_set):
        culture_pred, coffee_pred = zip(*pred)

        predictions.append(np.array([np.argmax(np.bincount(culture_pred)),
                                    np.argmax(np.bincount(coffee_pred))]))
    
    cm = multilabel_confusion_matrix(y, predictions)
    
    plot_confusion_matrix(cm[0], ["Culture", "No-Culture"],"Confusion Matrix\nCulture vs No-Culture\nDenseNet 64x64")
    plot_confusion_matrix(cm[1], ["Coffee", "Other"],"Confusion Matrix\nCoffee vs other\nDenseNet 64x64")
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print("Global metrics")
    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    culture_pred, coffee_pred = zip(*predictions)
    culture_true, coffee_true = zip(*y)
    print("Culture vs no-culture")
    print(f"F1-Score Culture: {round(f1_score(culture_true, culture_pred, pos_label=0),4)}")
    print(f"F1-Score No culture: {round(f1_score(culture_true, culture_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(culture_true, culture_pred, average='macro'),4)}")
    print(f"\n")
    print("Coffee vs other")
    print(f"F1-Score Coffee: {round(f1_score(coffee_true, coffee_pred, pos_label=0),4)}")
    print(f"F1-Score Other: {round(f1_score(coffee_true, coffee_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(coffee_true, coffee_pred, average='macro'),4)}")
コード例 #22
0
model.add(Dense(units_10, activation=activation_10))
model.add(Dropout(dropout_10))

model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='sigmoid'))

METRICS = [
            metrics.BinaryAccuracy(name='ACCURACY'),
            metrics.Precision(name='PRECISION'),
            metrics.Recall(name='RECALL'),
            metrics.AUC(name='AUC'),
            metrics.TruePositives(name='TP'),
            metrics.TrueNegatives(name='TN'),
            metrics.FalsePositives(name='FP'),
            metrics.FalseNegatives(name='FN')]

model.compile(loss='binary_crossentropy',
                optimizer=compile_optimizer,
                metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
コード例 #23
0
 def test_unweighted_top_k(self):
     r_obj = metrics.Recall(top_k=3)
     y_pred = K.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
     y_true = K.constant([0, 1, 1, 0, 0], shape=(1, 5))
     result = r_obj(y_true, y_pred)
     assert np.isclose(0.5, K.eval(result))
コード例 #24
0
 def test_unweighted_with_threshold(self):
     r_obj = metrics.Recall(thresholds=[0.5, 0.7])
     y_pred = K.constant([1, 0, 0.6, 0], shape=(1, 4))
     y_true = K.constant([0, 1, 1, 0], shape=(1, 4))
     result = r_obj(y_true, y_pred)
     assert np.allclose([0.5, 0.], K.eval(result), 0)
コード例 #25
0
def f1_score(y_true, y_pred):
    return 2 * (
        (metrics.Precision(y_true, y_pred) * metrics.Recall(y_true, y_pred)) /
        (metrics.Precision(y_true, y_pred) +
         metrics.Recall(y_true, y_pred))) + k.epsilo()
コード例 #26
0
def set_model_prediction(model_path, test_set, custom_objects, with_f1=False, labels=["Coffee", "Other"], title="title"):
    """Compute metrics of a set of models. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
        labels (list, optional): Label for confusion Matrix. Defaults to ["Coffee", "Other"].
        title (str, optional): Title of confusion matrix. Defaults to "title".
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set['label'].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0).reshape(-1).tolist())
            
    predictions = []
    for pred in zip(*prediction_set):
        predictions.append(np.argmax(np.bincount(pred)))
    
    cm = confusion_matrix(y, predictions)
    plot_confusion_matrix(cm, labels,title)
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    print(f"F1-Score {labels[0]}: {round(f1_score(y, predictions, pos_label=0),4)}")
    print(f"F1-Score {labels[1]}: {round(f1_score(y, predictions, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(y, predictions, average='macro'),4)}")
コード例 #27
0
    model.add(Dense(100, activation='relu'))
    model.add(Dropout(0.4))
    model.add(Dense(4, activation='softmax'))
    opt = optimizers.Adam(learning_rate=0.0001)

    epoch_number = 30

    # Initiate tool for logging the results and scores during the run
    name = 'Training_Results_Epochs_' + \
        str(epoch_number)+'_5Fold_Run_FastText_Detection-'+str(run)+'.csv'
    csv_logger = CSVLogger('results/' + name)

    # Compile the ML model)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=[km.Recall()],
                  sample_weight_mode='temporal')
    print('Compilation successful.')

    # Fit the model in XY epochs (see variable above)

    model.fit(x_train,
              y_train,
              batch_size=32,
              epochs=epoch_number,
              verbose=1,
              validation_split=0.1,
              shuffle=True,
              sample_weight=sample_weights,
              callbacks=[csv_logger])
コード例 #28
0
def run(model_name, train_file, val_file, num_classes, filename, dropout, input_shape_arg = (224,224,3)):
    """
        fit dataset and run training process

        Arguments:\n
        train_file -->  h5 file, fit to model\n
        val_file --> h5 file, for validation\n
        num_classes --> int, total classes \n
        dropout_value --> float, range 0 - 1 for dropout\n
        epoch --> int\n
        batch_size --> int, [8, 16, 32, 64, 128, 256, etc.]\n
        input_shape_arg --> shape of image (W,H,C)\n
        lr_value --> learning rate value\n
        optimizer --> Adam, SGD\n

        Returns:\n
        model\n
        x_test\n
        y_test
    """
    
    # preprocessing data
    X_train, Y_train, X_val, Y_val = dataset_preprocess(num_classes, train_file, val_file)

    _epoch = 80
    lr_value_array = [1e-3, 1e-4]
    if model_name == "resnet50":
        batch_size_array = [8, 16, 32]
        LABEL = ["e-3(8)", "e-3(16)", "e-3(32)", "e-4(8)", "e-4(16)", "e-4(32)"]
    elif model_name == "resnet18":
        batch_size_array = [16, 32, 64]
        LABEL = ["e-3(16)", "e-3(32)", "e-3(64)", "e-4(16)", "e-4(32)", "e-4(64)"]

    HP = []
    for lr in lr_value_array:
        for bs in batch_size_array:
            hp = Hyperparameter("adam", lr, bs, dropout)
            HP.append(hp)

    HISTORY = []
    ROC = []
    for hp in HP:
        K.clear_session()
        model = None
        # compile model
        if model_name == "resnet50":
            print("resnet50")
            model = ResNet50(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "resnet18":
            print("resnet18")
            model = ResNet18(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "vgg19":
            print("VGG19")
            # configure model input    
            base_model = applications.vgg19.VGG19(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])
        elif model_name == "vgg16":
            print("VGG16")
            # configure model input
            base_model = applications.vgg16.VGG16(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])

        # optimizer == adam
        # train the model
        history = model.fit(X_train, Y_train, epochs = _epoch, batch_size = hp.batch_size, 
                validation_data=(X_val, Y_val), 
                shuffle=True)
        HISTORY.append(history)
        del model

        print(f"DONE for: {hp.optim}-{hp.lr_value}-{hp.batch_size}-{hp.dropout}")

    
    plt.figure(1)
    mpl.style.use('seaborn')
    i = 0
    for history in HISTORY:
        plt.plot(history.history["val_accuracy"], f"C{i}", label=LABEL[i])
        i = i+1
    plt.ylabel('val_acc')
    plt.xlabel('epoch')
    plt.title(f"Accuracy {filename}")
    plt.legend()
    plt.savefig(f"ACC-{filename}.png")

    print("VAL ACC:")
    i = 0
    for history in HISTORY:
        print(LABEL[i])
        i = i + 1
        print("auc: {}" .format(statistics.mean( history.history["val_auc_1"] )) )
        print("recall: {}" .format(statistics.mean( history.history["val_recall_1"] )) )
        print("Prec: {}" .format(statistics.mean( history.history["val_precision_1"] )) )
        print("MEAN: {}" .format(statistics.mean( history.history["val_accuracy"] )) )
        print("STD: {}" .format(statistics.pstdev( history.history['val_accuracy'] )) )
コード例 #29
0
 def test_unweighted(self):
     r_obj = metrics.Recall()
     y_pred = K.constant([1, 0, 1, 0], shape=(1, 4))
     y_true = K.constant([0, 1, 1, 0], shape=(1, 4))
     result = r_obj(y_true, y_pred)
     assert np.isclose(0.5, K.eval(result))
コード例 #30
0

print(imerg.shape, label.shape,goes.shape, flush=True)


model=UNet()
print(model.summary(),flush=True)
model = multi_gpu_model(model,gpus=2)

metrics = [
    metrics.FalseNegatives(name="fn"),
    metrics.FalsePositives(name="fp"),
    metrics.TrueNegatives(name="tn"),
    metrics.TruePositives(name="tp"),
    metrics.Precision(name="precision"),
    metrics.Recall(name="recall"),
]


model.compile(optimizer="Adam", loss='binary_crossentropy', metrics=metrics)

epochs=500
batch_size=16
earlystopper = EarlyStopping(patience=50,verbose=1, monitor='val_loss')
checkpointer = ModelCheckpoint('model_ck_mse_new.h5', save_best_only=True, verbose=1)

history=model.fit([goes,imerg], label, epochs=epochs, batch_size=batch_size,
          validation_split=0.3, callbacks=[earlystopper,checkpointer], verbose=2) 

results=pd.DataFrame(history.history)
print(results, flush=True)