Exemplo n.º 1
0
def compute_all_metrics(execution_id, path_input, path_output, formula,
                        append):
    from metrics import accuracy, precision, recall, f1, specificity
    """
    Computes all metrics and persistes in a csv

    Args:
        execution_id (int): identifier of the execution
        path_input (string): path of the file that contains the classifications
        path_out (string): path of the file that will persist the metrics
        formula (string): mean_max | mean_mean
        append (boolean): true | false
    """

    # loading results
    with open(path_input) as data_file:
        data = json.load(data_file)

    # computing metrics
    tp = tn = fp = fn = 0
    for i in range(0, len(data)):
        if (data[i]['values'][formula]['positive'] >=
                data[i]['values'][formula]['negative']):
            if data[i]['values']['label'] == 'positive':
                tp += 1
            else:
                fp += 1
        elif (data[i]['values'][formula]['positive'] <
              data[i]['values'][formula]['negative']):
            if (data[i]['values']['label'] == 'negative'):
                tn += 1
            else:
                fn += 1
        else:
            raise Exception(
                "Positive similarity equals to negative similarity to news " +
                data[i]['id'])

    accuracy = accuracy(tp, tn, fp, fn)
    recall = recall(tp, fn)
    precision = precision(tp, fp)
    f1 = f1(precision, recall)
    specificity = specificity(tn, fp)

    # persiting the results
    with open(path_output, 'a' if append else 'w') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter=',')
        if (not append):
            spamwriter.writerow([
                'execution_id', 'tp', 'tn', 'fp', 'fn', 'accuracy',
                'precision', 'recall', 'f1', 'specificity'
            ])
        spamwriter.writerow([
            execution_id, tp, tn, fp, fn, accuracy, precision, recall, f1,
            specificity
        ])
Exemplo n.º 2
0
def test(model, test_inputs, test_labels):
    """
    :param model: tf.keras.Model inherited data type
        model being trained  
    :param test_input: Numpy Array - shape (num_images, imsize, imsize, channels)
        input images to test on
    :param test_labels: Numpy Array - shape (num_images, 2)
        ground truth labels one-hot encoded
    :return: float, float, float, float 
        returns dice score, sensitivity value (0.5 threshold), specificity value (0.5 threshold), 
        and precision value all of which are in the range [0,1]
    """
    BATCH_SZ = model.batch_size
    indices = np.arange(test_inputs.shape[0]).tolist()
    all_logits = None
    for i in range(0, test_labels.shape[0], BATCH_SZ):
        images = test_inputs[indices[i:i + BATCH_SZ]]
        logits = model(images)
        if type(all_logits) == type(None):
            all_logits = logits
        else:
            all_logits = np.concatenate([all_logits, logits], axis=0)
    """this should break if the dataset size isnt divisible by the batch size because
    the for loop it runs the batches on doesnt get predictions for the remainder"""
    sensitivity_val1 = sensitivity(test_labels, all_logits, threshold=0.15)
    sensitivity_val2 = sensitivity(test_labels, all_logits, threshold=0.3)
    sensitivity_val3 = sensitivity(test_labels, all_logits, threshold=0.5)
    specificity_val1 = specificity(test_labels, all_logits, threshold=0.15)
    specificity_val2 = specificity(test_labels, all_logits, threshold=0.3)
    specificity_val3 = specificity(test_labels, all_logits, threshold=0.5)

    dice = dice_coef(test_labels, all_logits)
    precision_val = precision(test_labels, all_logits)
    print(
        "Sensitivity 0.15: {}, Senstivity 0.3: {}, Senstivity 0.5: {}".format(
            sensitivity_val1, sensitivity_val2, sensitivity_val3))
    print("Specificity 0.15: {}, Specificity 0.3: {}, Specificity 0.5: {}".
          format(specificity_val1, specificity_val2, specificity_val3))
    print("DICE: {}, Precision: {}".format(dice, precision_val))

    return dice.numpy(), sensitivity_val3, specificity_val3, precision_val
def compute_all_metrics(execution_id, path_input, path_output, formula, append):
    from metrics import accuracy, precision, recall, f1, specificity
    """
    Computes all metrics and persistes in a csv

    Args:
        execution_id (int): identifier of the execution
        path_input (string): path of the file that contains the classifications
        path_out (string): path of the file that will persist the metrics
        formula (string): mean_max | mean_mean
        append (boolean): true | false
    """

    # loading results
    with open(path_input) as data_file:
        data = json.load(data_file)

    # computing metrics
    tp = tn = fp = fn = 0
    for i in range(0, len(data)):
        if (data[i]['values'][formula]['positive'] >= data[i]['values'][formula]['negative']):
            if data[i]['values']['label'] == 'positive':
                tp += 1
            else:
                fp += 1
        elif (data[i]['values'][formula]['positive'] < data[i]['values'][formula]['negative']):
            if (data[i]['values']['label'] == 'negative'):
                tn += 1
            else:
                fn += 1
        else:
            raise Exception("Positive similarity equals to negative similarity to news " + data[i]['id'])

    accuracy = accuracy(tp, tn, fp, fn)
    recall = recall(tp, fn)
    precision = precision(tp, fp)
    f1 = f1(precision, recall);
    specificity = specificity(tn, fp);

    # persiting the results
    with open(path_output, 'a' if append else 'w') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter=',')
        if (not append):
            spamwriter.writerow(
                ['execution_id', 'tp', 'tn', 'fp', 'fn', 'accuracy', 'precision', 'recall', 'f1', 'specificity'])
        spamwriter.writerow([execution_id, tp, tn, fp, fn, accuracy, precision, recall, f1, specificity])
Exemplo n.º 4
0
def train(model, generator, verbose=False):
    """trains the model for one epoch

    :param model: tf.keras.Model inherited data type
        model being trained 
    :param generator: BalancedDataGenerator
        a datagenerator which runs preprocessing and returns batches accessed
        by integers indexing (i.e. generator[0] returns the first batch of inputs 
        and labels)
    :param verbose: boolean
        whether to output the dice score every batch
    :return: list
        list of losses from every batch of training
    """
    BATCH_SZ = model.batch_size
    train_steps = generator.steps_per_epoch
    loss_list = []
    for i in range(0, train_steps, 1):
        images, labels = generator[i]
        with tf.GradientTape() as tape:
            logits = model(images)
            loss = model.loss_function(labels, logits)
        if i % 4 == 0 and verbose:
            sensitivity_val = sensitivity(labels, logits)
            specificity_val = specificity(labels, logits)
            precision_val = precision(labels, logits)
            train_dice = dice_coef(labels, logits)
            print("Scores on training batch after {} training steps".format(i))
            print("Sensitivity1: {}, Specificity: {}".format(
                sensitivity_val, specificity_val))
            print("Precision: {}, DICE: {}\n".format(precision_val,
                                                     train_dice))

        loss_list.append(loss)
        gradients = tape.gradient(loss, model.trainable_variables)
        model.optimizer.apply_gradients(
            zip(gradients, model.trainable_variables))

    return loss_list
Exemplo n.º 5
0
    transform_path0 = os.path.join(result_path, 'TransformParameters.0.txt')
    transform_path1 = os.path.join(result_path, 'TransformParameters.1.txt')
    final_transform_path = os.path.join(result_path, 'transform_pathfinal.txt')

    # Change FinalBSplineInterpolationOrder to 0 for binary mask transformation
    TransformParameterFileEditor(transform_path1, transform_path0, final_transform_path).modify_transform_parameter_file()

    # Make a new transformix object tr with the CORRECT PATH to transformix
    tr = elastix.TransformixInterface(parameters=final_transform_path,
                                      transformix_path=TRANSFORMIX_PATH)

    transformed_pr_path = tr.transform_image(pr_image_path, output_dir=result_path)
    image_array_tpr = sitk.GetArrayFromImage(sitk.ReadImage(transformed_pr_path))

    log_path = os.path.join(result_path, 'IterationInfo.1.R3.txt')
    log = elastix.logfile(log_path)

    DSC.append(dice_coef(image_array_opr, image_array_tpr))
    SNS.append(sensitivity(image_array_opr, image_array_tpr))
    SPC.append(specificity(image_array_opr, image_array_tpr))
    finalMI.append(statistics.mean(log['metric'][-50:-1]))

fig, (ax1,ax2,ax3) = plt.subplots(1, 3, figsize=(15, 5))
ax1.scatter(finalMI,DSC)
ax1.set_title("DSC")
ax2.scatter(finalMI,SNS)
ax2.set_title("SNS")
ax3.scatter(finalMI,SPC)
ax3.set_title("SPC")
plt.show()
def get_scores(y_true, y_predict):
    return dice_coef(y_true, y_predict), sensitivity(y_true, y_predict), specificity(y_true, y_predict), MeanSurfaceDistance(y_true, y_predict), mutual_information(y_true, y_predict), rmse(y_true, y_predict)
Exemplo n.º 7
0
# fig, (ax6, ax7) = plt.subplots(1,2)
# ScrollView(J_binarized).plot(ax6, vmin=0, vmax=1)
# plt.title('Jacobian\ndeterminant')
#
# ScrollView(image_array_J).plot(ax7)
# ax7.set_title('Jacobian\ndeterminant')

fig, (ax8, ax9, ax10) = plt.subplots(1, 3)

ScrollView(image_array_opr).plot(ax8, vmin=0, vmax=1)
ax8.set_title("Unseen segmentation")

transformed_pr_path = tr.transform_image(pr_image_path, output_dir=r'results')
itk_image_tpr = sitk.ReadImage(transformed_pr_path)
image_array_tpr = sitk.GetArrayFromImage(itk_image_tpr)

ScrollView(image_array_tpr).plot(ax9, vmin=0, vmax=1)
ax9.set_title("Transformed patient segmentation")

segmentation_abs = abs(image_array_tpr - image_array_opr)
ScrollView(segmentation_abs).plot(ax10, vmin=0, vmax=1)
ax10.set_title("Absolute differences")

DSC = dice_coef(image_array_opr, image_array_tpr)
SNS = sensitivity(image_array_opr, image_array_tpr)
SPC = specificity(image_array_opr, image_array_tpr)

print("Dice coefficient is %.2f, sensitivity is %.2f, specificity is %.2f" %
      (DSC, SNS, SPC))

plt.show()
Exemplo n.º 8
0
def main():
    # Set hyperparameters
    num_folds = 100
    label_name = "1"

    # Specify data location
    data_path = "Data/test_data.csv"

    # Load data to table
    df = pd.read_csv(data_path, sep=";", index_col=0)

    # Check if any labels are missing
    print("Number of missing values:\n", df.isnull().sum())
    print()

    # Only keep first instance if multiple instances have the same key
    num_instances_before = len(df)
    df = df[~df.index.duplicated(keep="first")]
    num_instances_diff = num_instances_before - len(df)
    if num_instances_diff > 0:
        print(
            "Warning: {} instances removed due to duplicate keys - only keeping first occurrence!"
            .format(num_instances_diff))

    # Perform standardized preprocessing
    preprocessor = TabularPreprocessor()
    df = preprocessor.fit_transform(df)

    # Display bar chart with number of samples per class
    # seaborn.countplot(x=label_name, data=df)
    # plt.title("Original class frequencies")
    # plt.savefig("Results/original_class_frequencies.png")
    # plt.close()

    # Separate data into training and test
    y = df[label_name]
    x = df.drop(label_name, axis="columns")

    # Get samples per class
    print("Samples per class")
    for (label, count) in zip(*np.unique(y, return_counts=True)):
        print("{}: {}".format(label, count))
    print()

    # Get number of classes
    num_classes = len(np.unique(df[label_name].values))

    # Setup classifiers
    knn = KNeighborsClassifier(weights="distance")
    knn_param_grid = {
        "n_neighbors":
        [int(val)
         for val in np.round(np.sqrt(x.shape[1])) + np.arange(5) + 1] +
        [
            int(val)
            for val in np.round(np.sqrt(x.shape[1])) - np.arange(5) if val >= 1
        ],
        "p":
        np.arange(1, 5)
    }

    dt = DecisionTreeClassifier()
    dt_param_grid = {
        "criterion": ["gini", "entropy"],
        "splitter": ["best", "random"],
        "max_depth": np.arange(1, 20),
        "min_samples_split": [2, 4, 6],
        "min_samples_leaf": [1, 3, 5, 6],
        "max_features": ["auto", "sqrt", "log2"]
    }

    rf = RandomForestClassifier(n_estimators=100,
                                criterion="entropy",
                                max_depth=5,
                                min_samples_split=5,
                                min_samples_leaf=2)
    rf_param_grid = {}

    nn = MLPClassifier(hidden_layer_sizes=(32, 64, 32), activation="relu")
    nn_param_grid = {}

    clfs = {
        "knn": {
            "classifier": knn,
            "parameters": knn_param_grid
        },
        "dt": {
            "classifier": dt,
            "parameters": dt_param_grid
        },
        "rf": {
            "classifier": rf,
            "parameters": rf_param_grid
        },
        "nn": {
            "classifier": nn,
            "parameters": nn_param_grid
        }
    }

    clfs_performance = {"acc": [], "sns": [], "spc": [], "auc": []}

    # Initialize result table
    results = pd.DataFrame(index=list(clfs.keys()))

    # Iterate over classifiers
    for clf in clfs:

        # Initialize cumulated confusion matrix and fold-wise performance containers
        cms = np.zeros((num_classes, num_classes))
        performance_foldwise = {"acc": [], "sns": [], "spc": [], "auc": []}

        # Iterate over MCCV
        for fold_index in np.arange(num_folds):

            # Split into training and test data
            x_train, x_test, y_train, y_test = train_test_split(
                x, y, test_size=0.15, stratify=y, random_state=fold_index)

            # Perform standardization and feature imputation
            intra_fold_preprocessor = TabularIntraFoldPreprocessor(
                k="automated", normalization="standardize")
            intra_fold_preprocessor = intra_fold_preprocessor.fit(x_train)
            x_train = intra_fold_preprocessor.transform(x_train)
            x_test = intra_fold_preprocessor.transform(x_test)

            # Perform (ANOVA) feature selection
            selected_indices, x_train, x_test = univariate_feature_selection(
                x_train.values,
                y_train.values,
                x_test.values,
                score_func=f_classif,
                num_features="log2n")

            # # Random undersampling
            # rus = RandomUnderSampler(random_state=fold_index, sampling_strategy=0.3)
            # x_train, y_train = rus.fit_resample(x_train, y_train)

            # SMOTE
            smote = SMOTE(random_state=fold_index, sampling_strategy=1)
            x_train, y_train = smote.fit_resample(x_train, y_train)

            # Setup model
            model = clfs[clf]["classifier"]
            model.random_state = fold_index

            # Hyperparameter tuning and keep model trained with the best set of hyperparameters
            optimized_model = RandomizedSearchCV(
                model,
                param_distributions=clfs[clf]["parameters"],
                cv=5,
                random_state=fold_index)
            optimized_model.fit(x_train, y_train)

            # Predict test data using trained model
            y_pred = optimized_model.predict(x_test)

            # Compute performance
            cm = confusion_matrix(y_test, y_pred)
            acc = accuracy_score(y_test, y_pred)
            sns = metrics.sensitivity(y_test, y_pred)
            spc = metrics.specificity(y_test, y_pred)
            auc = metrics.roc_auc(y_test, y_pred)

            # Append performance to fold-wise and overall containers
            cms += cm
            performance_foldwise["acc"].append(acc)
            performance_foldwise["sns"].append(sns)
            performance_foldwise["spc"].append(spc)
            performance_foldwise["auc"].append(auc)

        # Calculate overall performance
        for metric in performance_foldwise:
            avg_metric = np.round(
                np.sum(performance_foldwise[metric]) /
                len(performance_foldwise[metric]), 2)
            clfs_performance[metric].append(avg_metric)

        # Display overall performances
        print("== {} ==".format(clf))
        print("Cumulative CM:\n", cms)
        for metric in clfs_performance:
            print("Avg {}: {}".format(metric, clfs_performance[metric][-1]))
        print()

        # Display confusion matrix
        # sns.heatmap(cms, annot=True, cmap="Blues", fmt="g")
        # plt.xlabel("Predicted")
        # plt.ylabel("Actual")
        # plt.title("{} - Confusion matrix".format(clf))
        # plt.savefig("Results/confusion_matrix-{}.png".format(clf))
        # plt.close()

    # Append performance to result table
    for metric in clfs_performance:
        results[metric] = clfs_performance[metric]

    # Save result table
    results.to_csv("performances.csv", sep=";")
    results.plot.bar(rot=45).legend(loc="upper right")
    plt.savefig("performance.png".format(clf))
    plt.show()
    plt.close()
masks = [sitk.GetArrayFromImage(sitk.ReadImage(os.path.join(data_path, patient, "prostaat.mhd"))) for patient in patients]
images = [sitk.GetArrayFromImage(sitk.ReadImage(os.path.join(data_path, patient, "mr_bffe.mhd"))) for patient in patients if patient.find("p1")>-1]

#specify unknown image & mask
unknown_mask=masks.pop()
unknown_image=images.pop()

#calculate mean of masks
mask_mean = np.sum(masks, axis=0)/np.shape(masks)[0] #only used to visualize the mean mask

#calculate majority voting combination of masks
st = time()
m1 = majority_voting(masks, 0.5)
d_m1 = st - time()

DSC_m1, SNS_m1, SPC_m1 = dice_coef(unknown_mask, m1), sensitivity(unknown_mask, m1), specificity(unknown_mask, m1)

#calculate global weighted voting combination of masks
st = time()
w1 = global_weighted_voting(images, masks, unknown_image, 0.5)
d_w1 = st - time()

DSC_w1, SNS_w1, SPC_w1 = dice_coef(unknown_mask, w1), sensitivity(unknown_mask, w1), specificity(unknown_mask, w1)

#calculate local weighted voting combination of masks
st = time()
g1 = local_weighted_voting(images, masks, unknown_image, 0.5, max_idx = 50000)
d_g1 = st - time()

DSC_g1, SNS_g1, SPC_g1 = dice_coef(unknown_mask, g1), sensitivity(unknown_mask, g1), specificity(unknown_mask, g1)