Example #1
0
 def test_unweighted_with_threshold(self):
     p_obj = metrics.Precision(thresholds=[0.5, 0.7])
     y_pred = K.constant([1, 0, 0.6, 0], shape=(1, 4))
     y_true = K.constant([0, 1, 1, 0], shape=(1, 4))
     result = p_obj(y_true, y_pred)
     assert np.allclose([0.5, 0.], K.eval(result), 0)
Example #2
0
def run(model_name, train_file, val_file, num_classes, filename, dropout, input_shape_arg = (224,224,3)):
    """
        fit dataset and run training process

        Arguments:\n
        train_file -->  h5 file, fit to model\n
        val_file --> h5 file, for validation\n
        num_classes --> int, total classes \n
        dropout_value --> float, range 0 - 1 for dropout\n
        epoch --> int\n
        batch_size --> int, [8, 16, 32, 64, 128, 256, etc.]\n
        input_shape_arg --> shape of image (W,H,C)\n
        lr_value --> learning rate value\n
        optimizer --> Adam, SGD\n

        Returns:\n
        model\n
        x_test\n
        y_test
    """
    
    # preprocessing data
    X_train, Y_train, X_val, Y_val = dataset_preprocess(num_classes, train_file, val_file)

    _epoch = 80
    lr_value_array = [1e-3, 1e-4]
    if model_name == "resnet50":
        batch_size_array = [8, 16, 32]
        LABEL = ["e-3(8)", "e-3(16)", "e-3(32)", "e-4(8)", "e-4(16)", "e-4(32)"]
    elif model_name == "resnet18":
        batch_size_array = [16, 32, 64]
        LABEL = ["e-3(16)", "e-3(32)", "e-3(64)", "e-4(16)", "e-4(32)", "e-4(64)"]

    HP = []
    for lr in lr_value_array:
        for bs in batch_size_array:
            hp = Hyperparameter("adam", lr, bs, dropout)
            HP.append(hp)

    HISTORY = []
    ROC = []
    for hp in HP:
        K.clear_session()
        model = None
        # compile model
        if model_name == "resnet50":
            print("resnet50")
            model = ResNet50(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "resnet18":
            print("resnet18")
            model = ResNet18(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "vgg19":
            print("VGG19")
            # configure model input    
            base_model = applications.vgg19.VGG19(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])
        elif model_name == "vgg16":
            print("VGG16")
            # configure model input
            base_model = applications.vgg16.VGG16(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])

        # optimizer == adam
        # train the model
        history = model.fit(X_train, Y_train, epochs = _epoch, batch_size = hp.batch_size, 
                validation_data=(X_val, Y_val), 
                shuffle=True)
        HISTORY.append(history)
        del model

        print(f"DONE for: {hp.optim}-{hp.lr_value}-{hp.batch_size}-{hp.dropout}")

    
    plt.figure(1)
    mpl.style.use('seaborn')
    i = 0
    for history in HISTORY:
        plt.plot(history.history["val_accuracy"], f"C{i}", label=LABEL[i])
        i = i+1
    plt.ylabel('val_acc')
    plt.xlabel('epoch')
    plt.title(f"Accuracy {filename}")
    plt.legend()
    plt.savefig(f"ACC-{filename}.png")

    print("VAL ACC:")
    i = 0
    for history in HISTORY:
        print(LABEL[i])
        i = i + 1
        print("auc: {}" .format(statistics.mean( history.history["val_auc_1"] )) )
        print("recall: {}" .format(statistics.mean( history.history["val_recall_1"] )) )
        print("Prec: {}" .format(statistics.mean( history.history["val_precision_1"] )) )
        print("MEAN: {}" .format(statistics.mean( history.history["val_accuracy"] )) )
        print("STD: {}" .format(statistics.pstdev( history.history['val_accuracy'] )) )
Example #3
0
 def test_unweighted(self):
     p_obj = metrics.Precision()
     y_pred = K.constant([1, 0, 1, 0], shape=(1, 4))
     y_true = K.constant([0, 1, 1, 0], shape=(1, 4))
     result = p_obj(y_true, y_pred)
     assert np.isclose(0.5, K.eval(result))
Example #4
0
def set_model_prediction_multi_label(model_path, test_set, custom_objects, with_f1=False):
    """Compute metrics of a set of models for multilabel model. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set[['label_culture','label_coffee']].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0))
            
    predictions = []
    for pred in zip(*prediction_set):
        culture_pred, coffee_pred = zip(*pred)

        predictions.append(np.array([np.argmax(np.bincount(culture_pred)),
                                    np.argmax(np.bincount(coffee_pred))]))
    
    cm = multilabel_confusion_matrix(y, predictions)
    
    plot_confusion_matrix(cm[0], ["Culture", "No-Culture"],"Confusion Matrix\nCulture vs No-Culture\nDenseNet 64x64")
    plot_confusion_matrix(cm[1], ["Coffee", "Other"],"Confusion Matrix\nCoffee vs other\nDenseNet 64x64")
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print("Global metrics")
    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    culture_pred, coffee_pred = zip(*predictions)
    culture_true, coffee_true = zip(*y)
    print("Culture vs no-culture")
    print(f"F1-Score Culture: {round(f1_score(culture_true, culture_pred, pos_label=0),4)}")
    print(f"F1-Score No culture: {round(f1_score(culture_true, culture_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(culture_true, culture_pred, average='macro'),4)}")
    print(f"\n")
    print("Coffee vs other")
    print(f"F1-Score Coffee: {round(f1_score(coffee_true, coffee_pred, pos_label=0),4)}")
    print(f"F1-Score Other: {round(f1_score(coffee_true, coffee_pred, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(coffee_true, coffee_pred, average='macro'),4)}")
Example #5
0
model.add(Dropout(dropout_9))

model.add(Dense(units_10, activation=activation_10))
model.add(Dropout(dropout_10))

model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='sigmoid'))

METRICS = [
            metrics.BinaryAccuracy(name='ACCURACY'),
            metrics.Precision(name='PRECISION'),
            metrics.Recall(name='RECALL'),
            metrics.AUC(name='AUC'),
            metrics.TruePositives(name='TP'),
            metrics.TrueNegatives(name='TN'),
            metrics.FalsePositives(name='FP'),
            metrics.FalseNegatives(name='FN')]

model.compile(loss='binary_crossentropy',
                optimizer=compile_optimizer,
                metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
Example #6
0
def set_model_prediction(model_path, test_set, custom_objects, with_f1=False, labels=["Coffee", "Other"], title="title"):
    """Compute metrics of a set of models. Computed metrics are : Loss, Accuracy, F1-Score, Macro F1-Score.

    Args:
        model_path (string): path of folder that contains models.
        test_set (pandas.DataFrame): DataFrame with column path for images path and label.
        custom_objects (dict): Dict of custom objects to load with model.
        with_f1 (bool, optional): True if the model is train with F1-Score metrics, otherwise False. Defaults to False.
        labels (list, optional): Label for confusion Matrix. Defaults to ["Coffee", "Other"].
        title (str, optional): Title of confusion matrix. Defaults to "title".
    """
    test_generator =  generator(test_set['path'].to_numpy(), 
                            test_set['label'].to_numpy(), 
                            eurosat_params['mean'], 
                            eurosat_params['std'], 
                            batch_size=len(test_set))
    
    prediction_set = []
    evaluate = []
    
    X, y = next(test_generator)
    
    for path in os.listdir(model_path):
        if path.split(".")[1] == 'h5':
            restored_model = None
            if with_f1:
                restored_model = load_model(os.path.join(model_path, path), custom_objects, compile=False)
                restored_model.compile(optimizer=Adam(learning_rate=0.00001), loss='binary_crossentropy',metrics=[metrics.BinaryAccuracy(name='accuracy'),metrics.Precision(name='precision'),metrics.Recall(name='recall'),f1_score_keras])
            else : 
                restored_model = load_model(os.path.join(model_path, path), custom_objects)
            
            evaluate.append(restored_model.evaluate(test_generator, steps=1))
            prediction_set.append(np.where(restored_model.predict(X) > 0.5, 1, 0).reshape(-1).tolist())
            
    predictions = []
    for pred in zip(*prediction_set):
        predictions.append(np.argmax(np.bincount(pred)))
    
    cm = confusion_matrix(y, predictions)
    plot_confusion_matrix(cm, labels,title)
    
    if with_f1:
        losses, accs, precisions, recalls, f1 = zip(*evaluate)
    else :
        losses, accs, precisions, recalls = zip(*evaluate)

    print(f"Mean accuracy : {round(np.mean(accs),4)}")
    print(f"Stdev accuracy : {round(np.std(accs),4)}")
    print("\n")
    print(f"Mean loss : {round(np.mean(losses),4)}")
    print(f"Stdev loss : {round(np.std(losses),4)}")
    print("\n")
    print(f"F1-Score {labels[0]}: {round(f1_score(y, predictions, pos_label=0),4)}")
    print(f"F1-Score {labels[1]}: {round(f1_score(y, predictions, pos_label=1),4)}")
    print(f"Macro F1-Score : {round(f1_score(y, predictions, average='macro'),4)}")
Example #7
0


print(imerg.shape, label.shape,goes.shape, flush=True)


model=UNet()
print(model.summary(),flush=True)
model = multi_gpu_model(model,gpus=2)

metrics = [
    metrics.FalseNegatives(name="fn"),
    metrics.FalsePositives(name="fp"),
    metrics.TrueNegatives(name="tn"),
    metrics.TruePositives(name="tp"),
    metrics.Precision(name="precision"),
    metrics.Recall(name="recall"),
]


model.compile(optimizer="Adam", loss='binary_crossentropy', metrics=metrics)

epochs=500
batch_size=16
earlystopper = EarlyStopping(patience=50,verbose=1, monitor='val_loss')
checkpointer = ModelCheckpoint('model_ck_mse_new.h5', save_best_only=True, verbose=1)

history=model.fit([goes,imerg], label, epochs=epochs, batch_size=batch_size,
          validation_split=0.3, callbacks=[earlystopper,checkpointer], verbose=2) 

results=pd.DataFrame(history.history)
def f1_score(y_true, y_pred):
    return 2 * (
        (metrics.Precision(y_true, y_pred) * metrics.Recall(y_true, y_pred)) /
        (metrics.Precision(y_true, y_pred) +
         metrics.Recall(y_true, y_pred))) + k.epsilo()
def train_model(dataset, model):
    epochs = 50
    #     epochs = 0
    lr = 1e-3
    size = 300
    wd = 1e-2
    bs = 4  # reduce this if you are running out of GPU memory
    pretrained = True

    config = {
        'epochs': epochs,
        'lr': lr,
        'size': size,
        'wd': wd,
        'bs': bs,
        'pretrained': pretrained,
    }

    wandb.config.update(config)
    checkpointer = ModelCheckpoint('model-resnet50.h5',
                                   verbose=1,
                                   save_best_only=True)
    # # Define IoU metric
    # def mean_iou(y_true, y_pred):
    #   prec = []
    #   for t in np.arange(0.5, 1.0, 0.05):
    #     y_pred_ = tf.to_int32(y_pred > t)
    #     score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
    #     K.get_session().run(tf.local_variables_initializer())
    #     with tf.control_dependencies([up_opt]):
    #         score = tf.identity(score)
    #     prec.append(score)
    # return K.mean(K.stack(prec), axis=0)

    earlystopper = EarlyStopping(patience=5, verbose=1)
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)

    model.compile(optimizer=optimizers.Adam(lr=lr),
                  loss='categorical_crossentropy',
                  metrics=[
                      metrics.Precision(top_k=1, name='precision'),
                      metrics.Recall(top_k=1, name='recall'),
                      FBeta(name='f_beta')
                  ])

    train_data, valid_data = datasets_keras.load_dataset(dataset, bs)
    _, ex_data = datasets_keras.load_dataset(dataset, 10)
    model.fit_generator(train_data,
                        validation_data=valid_data,
                        epochs=epochs,
                        callbacks=[
                            earlystopper, learning_rate_reduction,
                            checkpointer,
                            WandbCallback(input_type='image',
                                          output_type='segmentation_mask',
                                          validation_data=ex_data[0])
                        ])
Example #10
0
 def test_unweighted_top_k(self):
     p_obj = metrics.Precision(top_k=3)
     y_pred = K.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
     y_true = K.constant([0, 1, 1, 0, 0], shape=(1, 5))
     result = p_obj(y_true, y_pred)
     assert np.isclose(1. / 3, K.eval(result))