def test_classifier_chain_vs_independent_models():
    # Verify that an ensemble of classifier chains (each of length
    # N) can achieve a higher Jaccard similarity score than N independent
    # models
    X, Y = generate_multilabel_dataset_with_correlations()
    X_train = X[:600, :]
    X_test = X[600:, :]
    Y_train = Y[:600, :]
    Y_test = Y[600:, :]

    ovr = OneVsRestClassifier(LogisticRegression())
    ovr.fit(X_train, Y_train)
    Y_pred_ovr = ovr.predict(X_test)

    chain = ClassifierChain(LogisticRegression())
    chain.fit(X_train, Y_train)
    Y_pred_chain = chain.predict(X_test)

    assert_greater(jaccard_score(Y_test, Y_pred_chain, average='samples'),
                   jaccard_score(Y_test, Y_pred_ovr, average='samples'))
def test_base_chain_crossval_fit_and_predict():
    # Fit chain with cross_val_predict and verify predict
    # performance
    X, Y = generate_multilabel_dataset_with_correlations()

    for chain in [ClassifierChain(LogisticRegression()),
                  RegressorChain(Ridge())]:
        chain.fit(X, Y)
        chain_cv = clone(chain).set_params(cv=3)
        chain_cv.fit(X, Y)
        Y_pred_cv = chain_cv.predict(X)
        Y_pred = chain.predict(X)

        assert Y_pred_cv.shape == Y_pred.shape
        assert not np.all(Y_pred == Y_pred_cv)
        if isinstance(chain, ClassifierChain):
            assert jaccard_score(Y, Y_pred_cv, average='samples') > .4
        else:
            assert mean_squared_error(Y, Y_pred_cv) < .25
示例#3
0
def get_jaccard_score(y_true, y_pred, params=None):
    if params:
        return jaccard_score(y_true, y_pred, **params)
    return round(jaccard_score(y_true, y_pred), 3)
示例#4
0
ys_t = Split(n_targets, axis=1)(y_t)
ys_p = []
for j, k in enumerate(order):
    x_stacked = ColumnStack()(inputs=[x, *ys_p[:j]])
    ys_t[k] = Lambda(np.squeeze, axis=1)(ys_t[k])
    ys_p.append(LogisticRegression(solver="lbfgs")(x_stacked, ys_t[k]))

ys_p = [ys_p[order.index(j)] for j in range(n_targets)]
y_p = ColumnStack()(ys_p)

model = Model(inputs=x, outputs=y_p, targets=y_t)
plot_model(model, filename="classifier_chain.png",
           dpi=96)  # This might take a few seconds

# ------- Train model
model.fit(X_train, Y_train)

# ------- Evaluate model
Y_train_pred = model.predict(X_train)
Y_test_pred = model.predict(X_test)

print(
    "Jaccard score on train data:",
    jaccard_score(Y_train, Y_train_pred, average="samples"),
)
print(
    "Jaccard score on test data:",
    jaccard_score(Y_test, Y_test_pred, average="samples"),
)
示例#5
0
def jaccard_distance(KCA,KCs):
    distance = [1-jaccard_score(KCA,np.array(KCs[odor].values)) for odor in KCs.columns]
    return distance
示例#6
0
from sklearn.linear_model import LogisticRegression

if __name__ == "__main__":
	# Load a multi-label dataset from https://www.openml.org/d/40597
	X, Y = fetch_openml('yeast', version=4, return_X_y=True)
	Y = Y == 'TRUE'
	X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
														random_state=0)

	# Fit an independent logistic regression model for each class using the
	# OneVsRestClassifier wrapper.
	base_lr = LogisticRegression(solver='lbfgs')
	ovr = OneVsRestClassifier(base_lr)
	ovr.fit(X_train, Y_train)
	Y_pred_ovr = ovr.predict(X_test)
	ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average='samples')

	# Fit an ensemble of logistic regression classifier chains and take the
	# take the average prediction of all the chains.
	chains = [ClassifierChain(base_lr, order='random', random_state=i)
			  for i in range(10)]
	for chain in chains:
		chain.fit(X_train, Y_train)

	Y_pred_chains = np.array([chain.predict(X_test) for chain in
							  chains])
	chain_jaccard_scores = [jaccard_score(Y_test, Y_pred_chain >= .5,
										  average='samples')
							for Y_pred_chain in Y_pred_chains]

	Y_pred_ensemble = Y_pred_chains.mean(axis=0)
示例#7
0
    nist_fps = csr_matrix(
        nist_fps)[:, fpkeep].todense()  # fingerprints of nist compounds

    output = pd.DataFrame(columns=['smiles', 'mass', 'fp_score', 'rank'])
    for i in tqdm(range(len(smiles))):
        smi = smiles[i]
        std_smi = Chem.MolToSmiles(Chem.MolFromSmiles(smi))
        mass = molwt[i]
        pred_fp = pred_fps[i]
        try:
            true_fp = np.array(get_cdk_fingerprints(
                std_smi))  # true fingerprint of the "unknown"
        except:
            continue
        true_fp = true_fp[fpkeep]
        true_score = jaccard_score(pred_fp,
                                   true_fp)  # score of the true compound

        candidate = np.where(
            np.abs(nist_masses - mass) < 5)[0]  # candidate of nist
        cand_smi = nist_smiles[candidate]
        rep_ind = np.where(
            cand_smi == std_smi)[0]  # if the compound in nist, remove it.
        candidate = np.delete(candidate, rep_ind)

        fp_scores = get_fp_score(
            pred_fp, nist_fps[candidate, :])  # scores of all candidtates
        rank = len(np.where(fp_scores > true_score)[0]) + 1

        output.loc[len(output)] = [smi, mass, true_score, rank]
    output.to_csv('Discussion/MassBank_test/results/DeepEI_massbank.csv')
示例#8
0
y_pred = classifier.predict(X_test)

# Converting the object into array
yt = []
for ele in y_test:
    yt.append(ele)

# Calculate the efficiency
count = 0
for i in range(len(yt)):
    if y_pred[i] == yt[i]:
        count += 1
eff = count / len(yt)
print('Efficiency: {}'.format(eff))
print('F1 score: {}'.format(f1_score(yt, y_pred, average='macro')))
print('Precision score: {}'.format(precision_score(yt, y_pred,
                                                   average='macro')))
print('Jaccard score: {}'.format(jaccard_score(yt, y_pred, average=None)))

# Plot the efficiency data onto a graph
plt.title('HRV model 3 class')
plt.plot(y_pred)
plt.plot(yt)
plt.show()

# Construct a confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# End of model
print('End of model')
 def metrics_for_whole_test(self):
     ground_truth_array = np.load(self.ground_truth_array_path)
     predicted_images_array = np.load(self.predicted_images_one_hot)
     ground_truth_array_reshaped = np.reshape(
         ground_truth_array,
         (ground_truth_array.shape[0] * ground_truth_array.shape[1]**2,
          self.number_of_classes))
     predicted_images_array_reshaped = np.reshape(
         predicted_images_array,
         (predicted_images_array.shape[0] *
          predicted_images_array.shape[1]**2, self.number_of_classes))
     ground_truth_array_a = np.argmax(ground_truth_array_reshaped, axis=1)
     predicted_images_array_a = np.argmax(predicted_images_array_reshaped,
                                          axis=1)
     acc = accuracy_score(ground_truth_array_a, predicted_images_array_a)
     class_report = classification_report(
         ground_truth_array_a,
         predicted_images_array_a,
         labels=self.labels,
         target_names=self.conf_matrix_labels)
     conf_matrix = confusion_matrix(ground_truth_array_a,
                                    predicted_images_array_a,
                                    labels=self.labels)
     jacc_score = jaccard_score(ground_truth_array_a,
                                predicted_images_array_a,
                                labels=self.labels,
                                average=None)
     # dice_score = prediction.dice_coef(ground_truth_array_reshaped, predicted_images_array_reshaped)
     # print ('***************FOR ALL IMAGES METRICS ARE***************')
     # print ('Confusion matrix')
     # print (conf_matrix)
     # print ('Accuracy')
     # print (acc)
     # print ('Classification report')
     # print (class_report)
     # print ('Dice score (F1_score): {:.4f}'.format(dice_score))
     # print ('Jaccard index (IoU): {:.4f}'.format(jacc_score))
     class_report_dict = classification_report(
         ground_truth_array_a,
         predicted_images_array_a,
         labels=self.labels,
         target_names=self.conf_matrix_labels,
         output_dict=True)
     class_report_df = pd.DataFrame(class_report_dict).transpose().round(4)
     jacc_list = np.around(jacc_score, decimals=4).tolist()
     jacc_list = jacc_list + ['NaN', 'NaN', 'NaN']
     if self.number_of_classes == 2:
         acc_list = [acc.round(4), 'NaN', 'NaN', 'NaN', 'NaN']
     else:
         acc_list = [acc.round(4), 'NaN', 'NaN', 'NaN', 'NaN', 'NaN']
     class_report_df['Accuracy'], class_report_df['Jaccard'] = [
         acc_list, jacc_list
     ]
     confusion_matrix_df = pd.DataFrame(data=conf_matrix,
                                        index=self.conf_matrix_labels,
                                        columns=self.conf_matrix_labels)
     # df_combined = pd.concat([class_report_df, confusion_matrix_df], axis =0, ignore_index=True)
     class_report_csv_path = self.model_stat_folder + '/' + 'class_report.csv'
     class_report_df.to_csv(class_report_csv_path, index=True, header=True)
     confusion_matrix_csv_path = self.model_stat_folder + '/' + 'confusion_matrix.csv'
     confusion_matrix_df.to_csv(confusion_matrix_csv_path,
                                index=True,
                                header=True)
示例#10
0
for i in tqdm(range(len(valid_dataset))):
  im , lbl = valid_dataset[i]
  im = Variable(im.unsqueeze(0)).to(device)
  out = model(im)
  pred = out.max(1)[1].squeeze().cpu().data.numpy()
  true_labels.extend(lbl.flatten())
  predictions.extend(pred.flatten())


#Senstivity (Recall), F1 Score, and Precision
print("Recall,F1 Score, and Precision:")
print(classification_report(true_labels,predictions,digits=3))

#Jaccard Similarity
print("Jaccard Score:")
print(jaccard_score(true_labels, predictions, average=None))
print(jaccard_score(true_labels, predictions, average="macro"))

#Accuracy
print("Accuracy Score:")
print(accuracy_score(true_labels, predictions, normalize=True))

#Specificity
CM = confusion_matrix(true_labels,predictions)

false_positives = []
true_negatives = []

for l in range(len(CM)):
    class_true_negative = 0
    class_false_positive = 0
示例#11
0
def _sgdclassifier(*,
                   train,
                   test,
                   x_predict=None,
                   metrics,
                   loss='hinge',
                   penalty='l2',
                   alpha=0.0001,
                   l1_ratio=0.15,
                   fit_intercept=True,
                   max_iter=1000,
                   tol=0.001,
                   shuffle=True,
                   verbose=0,
                   epsilon=0.1,
                   n_jobs=None,
                   random_state=None,
                   learning_rate='optimal',
                   eta0=0.0,
                   power_t=0.5,
                   early_stopping=False,
                   validation_fraction=0.1,
                   n_iter_no_change=5,
                   class_weight=None,
                   warm_start=False,
                   average=False):
    """For for info visit :
        https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier
    """

    model = SGDClassifier(loss=loss,
                          penalty=penalty,
                          alpha=alpha,
                          l1_ratio=l1_ratio,
                          fit_intercept=fit_intercept,
                          max_iter=max_iter,
                          tol=tol,
                          shuffle=shuffle,
                          verbose=verbose,
                          epsilon=epsilon,
                          n_jobs=n_jobs,
                          random_state=random_state,
                          learning_rate=learning_rate,
                          eta0=eta0,
                          power_t=power_t,
                          early_stopping=early_stopping,
                          validation_fraction=validation_fraction,
                          n_iter_no_change=n_iter_no_change,
                          class_weight=class_weight,
                          warm_start=warm_start,
                          average=average)
    model.fit(train[0], train[1])
    model_name = 'SGDClassifier'
    y_hat = model.predict(test[0])

    if metrics == 'f1_score':
        accuracy = f1_score(test[1], y_hat)
    if metrics == 'jaccard_score':
        accuracy = jaccard_score(test[1], y_hat)
    if metrics == 'accuracy_score':
        accuracy = accuracy_score(test[1], y_hat)

    if x_predict is None:
        return (model_name, accuracy, None)

    y_predict = model.predict(x_predict)
    return (model_name, accuracy, y_predict)
示例#12
0
def jackass(str1,str2):
    asc1 = [ord(i) for i in str1]
    asc2 = [ord(i) for i in str2]
    result = jaccard_score(asc1, asc2, average='macro')
    return 100- (result*100)
示例#13
0
    pred = np.array(pred)
    indices = pred > 0
    comp = np.zeros(pred.shape)
    comp[indices] = 1

    for x in range(1, len(data)):
        pred = sess.run(prediction,
                        feed_dict={
                            text:
                            [bv.sequenceTranslate(data[0][0].split(), wdfull)]
                        })

        pred = np.array(pred)

        indices = pred > 0

        tmp = np.zeros(pred.shape)

        tmp[indices] = 1

        comp = np.concatenate((comp, tmp))

    truths = np.array([bv.translate(b[1], labelDict) for b in data])

    print('Hamming Loss:', hamming_loss(comp, truths))
    print('Zero One Loss:', zero_one_loss(comp, truths))
    print('Jaccard Score:', jaccard_score(comp, truths, average='samples'))
    print('F1-Score Micro:', f1_score(comp, truths, average='micro'))
    print('F1-Score Macro:', f1_score(comp, truths, average='macro'))
    print('Accuracy :', accuracy_score(comp, truths))
def evaluateNetwork(dataset, dataloader, loss_to_use, CEloss, w_for_GDL, tversky_loss_alpha, tversky_loss_beta,
                    focal_tversky_gamma, epoch, epochs_switch, epochs_transition, nclasses, net,
                    flag_compute_mIoU=False, savefolder=""):
    """
    It evaluates the network on the validation set.  
    :param dataloader: Pytorch DataLoader to load the dataset for the evaluation.
    :param net: Network to evaluate.
    :param savefolder: if a folder is given the classification results are saved into this folder. 
    :return: all the computed metrics.
    """""

    ##### SETUP THE NETWORK #####

    USE_CUDA = torch.cuda.is_available()

    if USE_CUDA:
        device = torch.device("cuda")
        net.to(device)
        torch.cuda.synchronize()

    ##### EVALUATION #####

    net.eval()  # set the network in evaluation mode

    batch_size = dataloader.batch_size

    CM = np.zeros((nclasses, nclasses), dtype=int)
    class_indices = list(range(nclasses))

    ypred_list = []
    ytrue_list = []
    loss_values = []
    with torch.no_grad():
        for k, data in enumerate(dataloader):

            batch_images, labels_batch, names = data['image'], data['labels'], data['name']

            txt = "Evaluation running.. {:.2f} % \r".format(((100.0 * k) / len(dataloader)))
            sys.stdout.write(txt)

            if USE_CUDA:
                batch_images = batch_images.to(device)
                labels_batch = labels_batch.to(device)

            # N x K x H x W --> N: batch size, K: number of classes, H: height, W: width
            outputs = net(batch_images)

            # predictions size --> N x H x W
            values, predictions_t = torch.max(outputs, 1)

            if loss_to_use == "NONE":
                loss_values.append(0.0)
            else:
                loss = computeLoss(loss_to_use, CEloss, w_for_GDL, tversky_loss_alpha, tversky_loss_beta,
                                   focal_tversky_gamma, epoch, epochs_switch, epochs_transition, labels_batch, outputs)

                loss_values.append(loss.item())

            pred_cpu = predictions_t.cpu()
            labels_cpu = labels_batch.cpu()

            if flag_compute_mIoU:
                ypred_list.extend(pred_cpu.numpy().ravel())
                ytrue_list.extend(labels_cpu.numpy().ravel())

            # CONFUSION MATRIX, PREDICTIONS ARE PER-COLUMN, GROUND TRUTH CLASSES ARE PER-ROW
            for i in range(batch_size):
                pred_index = pred_cpu[i].numpy().ravel()
                true_index = labels_cpu[i].numpy().ravel()
                confmat = confusion_matrix(true_index, pred_index, class_indices)
                CM += confmat

            # SAVE THE OUTPUT OF THE NETWORK
            for i in range(batch_size):

                if savefolder:
                    imgfilename = os.path.join(savefolder, names[i])
                    dataset.saveClassificationResult(batch_images[i].cpu(), outputs[i].cpu(), imgfilename)

    mean_loss = sum(loss_values) / len(loss_values)

    jaccard_s = 0.0

    if flag_compute_mIoU:
        ypred = np.array(ypred_list)
        del ypred_list
        ytrue = np.array(ytrue_list)
        del ytrue_list
        jaccard_s = jaccard_score(ytrue, ypred, average='weighted')

    # NORMALIZED CONFUSION MATRIX
    sum_row = CM.sum(axis=1)
    sum_row = sum_row.reshape((nclasses, 1))   # transform into column vector
    sum_row = sum_row + 1
    CMnorm = CM / sum_row    # divide each row using broadcasting


    # FINAL ACCURACY
    pixels_total = CM.sum()
    pixels_correct = np.sum(np.diag(CM))
    accuracy = float(pixels_correct) / float(pixels_total)

    metrics = {'ConfMatrix': CM, 'NormConfMatrix': CMnorm, 'Accuracy': accuracy, 'JaccardScore': jaccard_s}

    return metrics, mean_loss
示例#15
0
print(LR)

# Now we can predict using our test set:
yhat = LR.predict(X_test)
print(yhat)

# predict_proba returns estimates for all classes, ordered by the label of classes
# So, the first column is the probability of class 1, P(Y=1|X),
# and second column is probability of class 0, P(Y=0|X
yhat_prob = LR.predict_proba(X_test)
print(yhat_prob)

# Evaluation
# jaccard index
from sklearn.metrics import jaccard_score
print(jaccard_score(y_test, yhat, pos_label=0))
# confusion matrix
from sklearn.metrics import classification_report, confusion_matrix
import itertools


def plot_confusion_matrix(cm,
                          classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
示例#16
0
def get_iou(pred, gt):
    pred = np.asarray(pred.detach().cpu()).flatten()
    pred = np.around(pred)
    gt = np.asarray(gt.detach().cpu()).flatten()
    iou = jaccard_score(y_pred=pred, y_true=gt)
    return iou
print("confusion_matrix \n", cm)

print("cm.diagonal() ", cm.diagonal())
print("cm.sum(axis=0) ", cm.sum(axis=0))

accuracy = cm.diagonal() / cm.sum(axis=0)
print("accuracy ", accuracy)

print("accuracy_score ", metrics.accuracy_score(y_test, predicted))
print("macro metrics.f1_score ",
      metrics.f1_score(y_test, predicted, average="macro"))
print("micro metrics.f1_score ",
      metrics.f1_score(y_test, predicted, average="micro"))
print(
    "macro metrics.jaccard_score ",
    metrics.jaccard_score(y_test, predicted, average="macro"),
)
print(
    "micro metrics.jaccard_score ",
    metrics.jaccard_score(y_test, predicted, average="micro"),
)
print("metrics.classification_report \n",
      metrics.classification_report(y_test, predicted))
print(
    "metrics.precision_score ",
    metrics.precision_score(y_test, predicted, average="macro"),
)
print("metrics.recall_score ",
      metrics.recall_score(y_test, predicted, average="micro"))
print(
    "metrics.fbeta_score ",
示例#18
0
def evaluate(model,
             test_x,
             test_y,
             output_folder,
             title,
             class_specific=False,
             all_labels=None,
             weight_vector=None):
    # Ensure that labels is an np array
    if all_labels is None:
        labels = None
    else:
        labels = list(all_labels)

    if weight_vector is None:
        y_predicted = model.predict(test_x)
        y_predicted_max = np.argmax(y_predicted, axis=1)
    else:
        # Variant Output Shift
        y_predicted = model.predict(test_x)
        predicted_shift = list()
        for e in y_predicted:
            predicted_shift.append(shift_output(e, weight_vector))
        y_predicted_max = np.argmax(predicted_shift, axis=1)

    y_test_max = np.argmax(test_y, axis=1)

    # Print classification report
    report = classification_report(y_test_max,
                                   y_predicted_max,
                                   labels=labels,
                                   output_dict=True,
                                   digits=5)
    report_df = pd.DataFrame(report)
    report_df.to_csv(os.path.join(output_folder, 'report_' + title + '.csv'),
                     sep=' ',
                     header=True,
                     mode='a')

    # Print confusion matrix
    cm = confusion_matrix(y_test_max, y_predicted_max, labels=labels)
    cm_df = pd.DataFrame(cm)
    cm_df.to_csv(os.path.join(output_folder, 'cm_' + title + '.csv'),
                 sep=' ',
                 header=True,
                 mode='a')

    metrics = dict()

    # Evaluate further metrics
    # =============================================================================
    #    Balanced Accuracy Score
    # =============================================================================
    metrics['Balanced Accuracy Score'] = balanced_accuracy_score(
        y_test_max, y_predicted_max)

    # =============================================================================
    #    Cohen Kappa Score
    # =============================================================================
    metrics['Cohen Kappa Score (No weighted)'] = cohen_kappa_score(
        y_predicted_max, y_test_max, weights=None)
    metrics['Cohen Kappa Score (Linear weighted)'] = cohen_kappa_score(
        y_predicted_max, y_test_max, weights='linear')
    metrics['Cohen Kappa Score (Quadratic weighted)'] = cohen_kappa_score(
        y_predicted_max, y_test_max, weights='quadratic')

    # =============================================================================
    #    Hinge Loss
    # =============================================================================
    metrics['Hinge Loss'] = hinge_loss(y_test_max, y_predicted, labels=labels)

    # =============================================================================
    #    Matthews Correlation Coefficient
    # =============================================================================
    metrics['Matthews Correlation Coefficient'] = matthews_corrcoef(
        y_test_max, y_predicted_max)

    # =============================================================================
    #    Top k Accuracy Score (does not work, To DO)
    # =============================================================================
    # print("\n Top k Accuracy: ")
    # print(top_k_accuracy_score(y_test_max, y_predicted_max, k=5))

    # =============================================================================
    #    The following also work in the multi label case
    # =============================================================================

    # =============================================================================
    #    Accuracy Score
    # =============================================================================
    metrics['Accuracy Score'] = accuracy_score(y_test_max, y_predicted_max)

    # =============================================================================
    #    F1 Score
    # =============================================================================
    metrics['F Score (Micro)'] = f1_score(y_test_max,
                                          y_predicted_max,
                                          average='micro')
    metrics['F Score (Macro)'] = f1_score(y_test_max,
                                          y_predicted_max,
                                          average='macro')
    metrics['F Score (Weighted)'] = f1_score(y_test_max,
                                             y_predicted_max,
                                             average='weighted')
    if class_specific:
        metrics['F Score (None, i.e. for each class)'] = f1_score(
            y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    ROC AUC Score (in case of multi class sklearn only support macro and weighted averages)
    # =============================================================================
    # ROC AUC only works if each label occurs at least one time. Hence, we need to catch a exception
    print(y_test_max)
    try:
        metrics['ROC AUC Score (OVR) Macro'] = roc_auc_score(y_test_max,
                                                             y_predicted,
                                                             multi_class='ovr',
                                                             average='macro',
                                                             labels=labels)
        metrics['ROC AUC Score (OVR) Weighted'] = roc_auc_score(
            y_test_max,
            y_predicted,
            multi_class='ovr',
            average='weighted',
            labels=labels)
        metrics['ROC AUC Score (OVO) Macro'] = roc_auc_score(y_test_max,
                                                             y_predicted,
                                                             multi_class='ovo',
                                                             average='macro',
                                                             labels=labels)
        metrics['ROC AUC Score (OVO) Weighted'] = roc_auc_score(
            y_test_max,
            y_predicted,
            multi_class='ovo',
            average='weighted',
            labels=labels)
    except:
        print("Cannot calculate ROC AUC Score!")
        pass

    # =============================================================================
    #    F Beta Score
    # =============================================================================
    metrics['F Beta Score (Micro) b=0.5'] = fbeta_score(y_test_max,
                                                        y_predicted_max,
                                                        average='micro',
                                                        beta=0.5)
    metrics['F Beta Score (Macro) b=0.5'] = fbeta_score(y_test_max,
                                                        y_predicted_max,
                                                        average='macro',
                                                        beta=0.5)
    metrics['F Beta Score (Weighted) b=0.5'] = fbeta_score(y_test_max,
                                                           y_predicted_max,
                                                           average='weighted',
                                                           beta=0.5)
    if class_specific:
        metrics[
            'F Beta Score (None, i.e. for each class) b=0.5'] = fbeta_score(
                y_test_max, y_predicted_max, average=None, beta=0.5)

    metrics['F Beta Score (Micro) b=1.5'] = fbeta_score(y_test_max,
                                                        y_predicted_max,
                                                        average='micro',
                                                        beta=1.5)
    metrics['F Beta Score (Macro) b=1.5'] = fbeta_score(y_test_max,
                                                        y_predicted_max,
                                                        average='macro',
                                                        beta=1.5)
    metrics['F Beta Score (Weighted) b=1.5'] = fbeta_score(y_test_max,
                                                           y_predicted_max,
                                                           average='weighted',
                                                           beta=1.5)
    if class_specific:
        metrics[
            'F Beta Score (None, i.e. for each class) b=1.5'] = fbeta_score(
                y_test_max, y_predicted_max, average=None, beta=1.5)

    # =============================================================================
    #    Hamming Loss
    # =============================================================================
    metrics['Hamming Loss'] = hamming_loss(y_test_max, y_predicted_max)

    # =============================================================================
    #    Jaccard Score
    # =============================================================================
    metrics['Jaccard Score (Micro)'] = jaccard_score(y_test_max,
                                                     y_predicted_max,
                                                     average='micro')
    metrics['Jaccard Score (Macro)'] = jaccard_score(y_test_max,
                                                     y_predicted_max,
                                                     average='macro')
    metrics['Jaccard Score (Weighted)'] = jaccard_score(y_test_max,
                                                        y_predicted_max,
                                                        average='weighted')
    if class_specific:
        metrics['Jaccard Score (None, i.e. for each class)'] = jaccard_score(
            y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    Log Loss
    # =============================================================================
    metrics['Logg Loss'] = log_loss(y_test_max, y_predicted, labels=labels)

    # =============================================================================
    #    Precision Score
    # =============================================================================
    metrics['Precision Score (Micro)'] = precision_score(y_test_max,
                                                         y_predicted_max,
                                                         average='micro')
    metrics['Precision Score (Macro)'] = precision_score(y_test_max,
                                                         y_predicted_max,
                                                         average='macro')
    metrics['Precision Score (Weighted)'] = precision_score(y_test_max,
                                                            y_predicted_max,
                                                            average='weighted')
    if class_specific:
        metrics[
            'Precision Score (None, i.e. for each class)'] = precision_score(
                y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    Specificity Score
    # =============================================================================
    metrics['Specificity Score (Micro)'] = specificity_score(y_test_max,
                                                             y_predicted_max,
                                                             average='micro')
    metrics['Specificity Score (Macro)'] = specificity_score(y_test_max,
                                                             y_predicted_max,
                                                             average='macro')
    metrics['Specificity Score (Weighted)'] = specificity_score(
        y_test_max, y_predicted_max, average='weighted')
    if class_specific:
        metrics[
            'Specificity Score (None, i.e. for each class)'] = specificity_score(
                y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    Recall Score (also named Sensitivity Score). Hence, the Sensitivity Score values
    #   should be the same as the Recall Score values
    # =============================================================================
    metrics['Recall Score (Micro)'] = recall_score(y_test_max,
                                                   y_predicted_max,
                                                   average='micro')
    metrics['Recall Score (Macro)'] = recall_score(y_test_max,
                                                   y_predicted_max,
                                                   average='macro')
    metrics['Recall Score (Weighted)'] = recall_score(y_test_max,
                                                      y_predicted_max,
                                                      average='weighted')
    if class_specific:
        metrics['Recall Score (None, i.e. for each class)'] = recall_score(
            y_test_max, y_predicted_max, average=None)

    metrics['Sensitivity Score (Micro)'] = sensitivity_score(y_test_max,
                                                             y_predicted_max,
                                                             average='micro')
    metrics['Sensitivity Score (Macro)'] = sensitivity_score(y_test_max,
                                                             y_predicted_max,
                                                             average='macro')
    metrics['Sensitivity Score (Weighted)'] = sensitivity_score(
        y_test_max, y_predicted_max, average='weighted')
    if class_specific:
        metrics[
            'Sensitivity Score (None, i.e. for each class)'] = sensitivity_score(
                y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    Geometric Mean Score
    # =============================================================================
    metrics['Geometric Mean Score (Normal)'] = geometric_mean_score(
        y_test_max, y_predicted_max)
    metrics['Geometric Mean Score (Micro)'] = geometric_mean_score(
        y_test_max, y_predicted_max, average='micro')
    metrics['Geometric Mean Score (Macro)'] = geometric_mean_score(
        y_test_max, y_predicted_max, average='macro')
    metrics['Geometric Mean Score (Weighted)'] = geometric_mean_score(
        y_test_max, y_predicted_max, average='weighted')
    if class_specific:
        metrics[
            'Geometric Mean Score (None, i.e. for each class)'] = geometric_mean_score(
                y_test_max, y_predicted_max, average=None)

    # =============================================================================
    #    Zero one Loss
    # =============================================================================
    metrics['Zero One Loss'] = zero_one_loss(y_test_max, y_predicted_max)

    # =============================================================================
    #    Make Index Balanced Accuracy with
    # =============================================================================
    # print("\n MIBA with Matthews")
    # geo_mean = make_index_balanced_accuracy(alpha=0.5, squared=True)(hamming_loss)
    # print(geo_mean(y_test_max, y_predicted_max))
    return metrics
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

# Build model
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train, y_train)

# Using model to predict
yhat = LR.predict(X_test)
yhat_prob = LR.predict_proba(X_test)
print(yhat)
print(yhat_prob)

# Evaluation using Jaccard Index
print('average=None', metrics.jaccard_score(y_test, yhat, average=None))
print('micro', metrics.jaccard_score(y_test, yhat, average='micro'))
print('macro', metrics.jaccard_score(y_test, yhat, average='macro'))
print('weighted', metrics.jaccard_score(y_test, yhat, average='weighted'))

# Evaluation using confusion matrix
cm = metrics.confusion_matrix(y_test, yhat)
disp = metrics.ConfusionMatrixDisplay(
    confusion_matrix=cm,
    display_labels=["Iris-setosa", "Iris-versicolor", "Iris-virginica"])
disp.plot()

# Evaluation using confusion matrix (normalize=true -> return probability over true label (row))
cm_true = metrics.confusion_matrix(y_test, yhat, normalize='true')
disp_true = metrics.ConfusionMatrixDisplay(
    confusion_matrix=cm_true,
示例#20
0
    def score_estimators(self):
        """
        Get F1 scores of self.r_estimators and self.f_estimators on the fake and real data, respectively.

        :return: dataframe with the results for each estimator on each data test set.
        """
        if self.target_type == 'class':
            rows = []
            for r_classifier, f_classifier, estimator_name in zip(
                    self.r_estimators, self.f_estimators,
                    self.estimator_names):
                for dataset, target, dataset_name in zip(
                    [self.real_x_test, self.fake_x_test],
                    [self.real_y_test, self.fake_y_test], ['real', 'fake']):
                    predictions_classifier_real = r_classifier.predict(dataset)
                    predictions_classifier_fake = f_classifier.predict(dataset)
                    f1_r = f1_score(target,
                                    predictions_classifier_real,
                                    average="micro")
                    f1_f = f1_score(target,
                                    predictions_classifier_fake,
                                    average="micro")
                    jac_sim = jaccard_score(predictions_classifier_real,
                                            predictions_classifier_fake,
                                            average='micro')
                    row = {
                        'index': f'{estimator_name}_{dataset_name}',
                        'f1_real': f1_r,
                        'f1_fake': f1_f,
                        'jaccard_similarity': jac_sim
                    }
                    rows.append(row)
            results = pd.DataFrame(rows).set_index('index')

        elif self.target_type == 'regr':
            r2r = [
                rmse(self.real_y_test, clf.predict(self.real_x_test))
                for clf in self.r_estimators
            ]
            f2f = [
                rmse(self.fake_y_test, clf.predict(self.fake_x_test))
                for clf in self.f_estimators
            ]

            # Calculate test set accuracies on the other dataset
            r2f = [
                rmse(self.fake_y_test, clf.predict(self.fake_x_test))
                for clf in self.r_estimators
            ]
            f2r = [
                rmse(self.real_y_test, clf.predict(self.real_x_test))
                for clf in self.f_estimators
            ]
            index = [f'real_data_{classifier}' for classifier in self.estimator_names] + \
                    [f'fake_data_{classifier}' for classifier in self.estimator_names]
            results = pd.DataFrame({
                'real': r2r + f2r,
                'fake': r2f + f2f
            },
                                   index=index)
        else:
            raise Exception(
                f'self.target_type should be either \'class\' or \'regr\', but is {self.target_type}.'
            )
        return results
示例#21
0
    def iteration(self, node_status=True):

        '''
        Execute a single model iteration

        :return: Iteration_id, Incremental node status (dictionary code -> status)
        '''
        # An iteration changes the opinion of the selected agent 'i' using the following procedure:
        # if i is stubborn then its status doesn't change, else
        # - select all its neighbors
        # - if between each pair of agents, there is a smaller distance than epsilon, then
        # the state of the agent change, becoming the sum between its initial opinion and
        # the average of weighted opinions of its neighbors
        # multiplied by a different factor based on the sign of the agent's (i) opinion.

        self.clean_initial_status(None)

        actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}

        if self.actual_iteration == 0:
            use_stubborn_node = False
            negatives = []
            positives = []
            for node in self.graph.nodes:
                if self.params['model']['similarity'] == 1:
                    # use the similarity vector
                    # create binary vector for the agent character
                    i = 0
                    if len(self.params['nodes']['vector'][node]) == 0:
                    #self.params['nodes']['vector'][node] = []
                        while i < 5:
                            self.params['nodes']['vector'][node].append(np.random.randint(2))
                            i += 1
                if self.params['nodes']['stubborn'][node] == 1:
                    # use the stubborn nodes
                    use_stubborn_node = True
                # if stubborns have intermediate negative opinions
                '''if actual_status[node]>=-0.6 and actual_status[node]<=-0.4:'''
                if actual_status[node] <= -0.8:
                    if node not in negatives:
                        negatives.append(node)
                # if stubborns have intermediate positive opinions
                '''if actual_status[node] >= 0.4 and actual_status[node]<=0.6:'''
                if actual_status[node] >= 0.8:
                    if node not in positives:
                        positives.append(node)

            join_list = negatives + positives
            num_stubborns = 0
            if use_stubborn_node == False:
                # based on the value of option_for_stubbornness, compute num_stubborns or only on negatives, or on positives or on the union of the two
                if self.params['model']['option_for_stubbornness'] == -1 and len(negatives) != 0:
                    num_stubborns = int(float(len(negatives)) * float(self.params['model']['perc_stubborness']))
                elif self.params['model']['option_for_stubbornness'] == 1 and len(positives) != 0:
                    num_stubborns = int(float(len(positives)) * float(self.params['model']['perc_stubborness']))
                elif self.params['model']['option_for_stubbornness'] == 0 and len(join_list) != 0:
                    num_stubborns = int(float(len(join_list)) * float(self.params['model']['perc_stubborness']))

                count_stub = 0

                while count_stub < num_stubborns:
                    if self.params['model']['option_for_stubbornness'] == -1:
                        n = random.choice(negatives)
                    elif self.params['model']['option_for_stubbornness'] == 1:
                        n = random.choice(positives)
                    elif self.params['model']['option_for_stubbornness'] == 0:
                        n = random.choice(join_list)

                    if self.params['nodes']['stubborn'][n] == 0:
                        self.params['nodes']['stubborn'][n] = 1
                        count_stub += 1

                # if num_stubborns is compute with uniform distribution over the entire population (option_for_stubbornness is not used in this case)
                '''
                num_stubborns = 0
                if setting == False:
                    num_stubborns = int(float(self.graph.number_of_nodes())*float(self.params['model']['perc_stubborness']))
                    count_stub = 0

                    while count_stub < num_stubborns:
                        n = list(self.graph.nodes)[np.random.randint(0,self.graph.number_of_nodes())]
                        if self.params['nodes']['stubborn'][n] == 0:
                            self.params['nodes']['stubborn'][n] = 1
                            count_stub += 1
                '''

            self.actual_iteration += 1
            # delta, node_count, status_delta = self.status_delta(self.status)
            if node_status:
                return {"iteration": 0, "status": self.status.copy(),
                        "node_count": len(self.status), "status_delta": self.status.copy()}
            else:
                return {"iteration": 0, "status": {},
                        "node_count": len(self.status), "status_delta": self.status.copy()}

        '''
        - select a random agent n1
        - if it is stubborn:
            its status doesn't change 
        - else:
            - select all its neighbors
            - for each neighbor, diff_opinion is compute
            - if diff_opinion < epsilon then:
                sum the weighted opinion of the neighbor to the sum_op
            - compute new_op (updated opinion of n1)
        '''
        for i in range(0, self.graph.number_of_nodes()):
            # select a random node
            n1 = list(self.graph.nodes)[np.random.randint(0, self.graph.number_of_nodes())]

            # if n1 isn't stubborn
            if self.params['nodes']['stubborn'][n1] == 0:
                # select neighbors of n1
                neighbours = list(self.graph.neighbors(n1))
                sum_op = 0
                count_in_eps = 0

                if len(neighbours) == 0:
                    continue

                for neigh in neighbours:
                    key = (n1, neigh)

                    # compute the difference between opinions
                    diff_opinion = np.abs((actual_status[n1]) - (actual_status[neigh]))
                    if diff_opinion < self.params['model']['epsilon']:
                        jaccard_sim = 0
                        if self.params['model']['similarity'] == 1:
                            # compute similarity between n1 and neigh using jaccard score
                            jaccard_sim = jaccard_score(self.params['nodes']['vector'][n1],
                                                        self.params['nodes']['vector'][neigh])
                        weight = 0
                        if not self.graph.has_edge(key[0], key[1]):
                            e = list(key)
                            reverse = [e[1], e[0]]
                            link = tuple(reverse)
                            if link in self.params['edges']['weight']:
                                weight = (self.params['edges']['weight'][link])
                            elif not self.graph.directed:
                                weight = (self.params['edges']['weight'][key])
                        else:
                            if key in self.params['edges']['weight']:
                                weight = (self.params['edges']['weight'][key])
                            elif not self.graph.directed:
                                weight = (self.params['edges']['weight'][(key[1], key[0])])

                        if self.params['model']['similarity'] == 1:
                            sum_op += (actual_status[neigh] * weight) * jaccard_sim
                        else:
                            sum_op += (actual_status[neigh] * weight)
                        # count_in_eps is the number of neighbors in epsilon
                        count_in_eps += 1

                if (count_in_eps > 0):
                    if actual_status[n1] > 0:
                        new_op = actual_status[n1] + ((sum_op / count_in_eps) * (1 - actual_status[n1]))
                    elif actual_status[n1] <= 0:
                        new_op = actual_status[n1] + ((sum_op / count_in_eps) * (1 + actual_status[n1]))

                else:
                    # if there aren't neighbors in epsilon, the status of n1 doesn't change
                    new_op = actual_status[n1]
            # if n1 is stubborn
            else:
                # opinion doesn't change
                new_op = actual_status[n1]

            actual_status[n1] = new_op

        # delta, node_count, status_delta = self.status_delta(actual_status)
        self.status = actual_status
        self.actual_iteration += 1
        if node_status:
            return {"iteration": self.actual_iteration - 1, "status": self.status.copy(), "node_count": len(actual_status),
                    "status_delta": self.status.copy()}
        else:
            return {"iteration": self.actual_iteration - 1, "status": {}, "node_count": len(actual_status),
                    "status_delta": self.status.copy()}
    AnnList.append(ann)
    PredList.append(out2)

PredList = np.array(PredList)
AnnList = np.array(AnnList)
# print(PredList.shape)
# print(AnnList.shape)

PredList = PredList.reshape((-1, ))
AnnList = AnnList.reshape((-1, ))
# print(PredList.shape)
# print(AnnList.shape)

Acc = accuracy_score(AnnList, PredList)
F1 = f1_score(AnnList, PredList, average='macro')
av_iou = jaccard_score(AnnList, PredList, average='macro')

print('======================================')
print('precision_score=', precision_score(AnnList, PredList, average='macro'))
print('recall_score=', recall_score(AnnList, PredList, average='macro'))
print('av_iou=', av_iou)
print('F1_score=', F1)
print('accuracy_score=', Acc)
print('======================================')

f = open('results/' + Modelname.split('.')[0] + '.txt', "w+")
f.write(Modelname.split('.')[0] + '\n')
f.write('===============' + '\n')
f.write('precision_score= ' +
        str(precision_score(AnnList, PredList, average='macro')) + '\n')
f.write('recall_score= ' +
示例#23
0
    )
    for method in methods:
        sep = ','
        if method == 'raw':
            infile = '../experiment/dataset-1000/' + dataset + '.' + method + '.csv'
        elif method == 'dca':
            infile = '../experiment/dataset-1000-result/' + \
                method + '/' + dataset + '.' + method + '.tsv'
            sep = '\t'
        else:
            infile = '../experiment/dataset-1000-result/' + \
                method + '/' + dataset + '.' + method + '.csv'

        print('reading {0} data...'.format(method))
        print(infile)
        data = pd.read_csv(infile, index_col=0, sep=sep)
        data = data.fillna(0)

        clustering = KMeans(n_clusters=5).fit(data.T)
        labels = clustering.labels_
        print(labels.shape)
        output.write(','.join([
            method,
            str(adjusted_rand_score(groups, labels)),
            str(jaccard_score(groups, labels, average='macro')),
            str(
                normalized_mutual_info_score(
                    groups, labels, average_method='min')),
            str(silhouette_score(data.T, groups))
        ]) + '\n')
示例#24
0
        bestScore = score
        best_clf = clf_knn
        bestK = k

print("Best K is :", bestK, "| Cross validation Accuracy :", bestScore)
clf_knn = best_clf

plt.plot(range(3, 12), accList)
plt.xlabel('K')
plt.ylabel('CV Accuracy')
plt.show()

clf_knn.fit(x_train, y_train)
y_pred = best_clf.predict(x_train)

trainScores['KNN-jaccard'] = jaccard_score(y_train, y_pred)
trainScores['KNN-f1-score'] = f1_score(y_train, y_pred, average='weighted')

trainScores
"""# Decision Tree"""

from sklearn import tree

clf_tree = tree.DecisionTreeClassifier()
clf_tree = clf_tree.fit(x_train, y_train)

y_pred = clf_tree.predict(x_train)

trainScores['Tree-jaccard'] = jaccard_score(y_train, y_pred)
trainScores['Tree-f1-score'] = f1_score(y_train, y_pred, average='weighted')
print('Train set:', X_train.shape, y_train.shape)
print('Test set:', X_test.shape, y_test.shape)

from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix

LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train, y_train)
print(LR)

yhat = LR.predict(X_test)

yhat_prob = LR.predict_proba(X_test)

from sklearn.metrics import jaccard_score

print(jaccard_score(y_test, yhat))

from sklearn.metrics import classification_report, confusion_matrix
import itertools


def plot_confusion_matrix(cm,
                          classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
示例#26
0
def jaccard_similarity(list1, list2):
    return jaccard_score(list1, list2, average='binary')
# Load a multi-label dataset from https://www.openml.org/d/40597
X, Y = fetch_openml("yeast", version=4, return_X_y=True)
Y = Y == "TRUE"
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.2,
                                                    random_state=0)

# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average="samples")

# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [
    ClassifierChain(base_lr, order="random", random_state=i) for i in range(10)
]
for chain in chains:
    chain.fit(X_train, Y_train)

Y_pred_chains = np.array([chain.predict(X_test) for chain in chains])
chain_jaccard_scores = [
    jaccard_score(Y_test, Y_pred_chain >= 0.5, average="samples")
    for Y_pred_chain in Y_pred_chains
]
示例#28
0
def save_class_results(y_groundtruth, y_predicted, file_names, fold):
    metrics = np.zeros((3, 9))
    metrics_names = [
        'Bal_Acc', 'Prec_Mic', 'Prec_Mac', 'Prec_Wgt', 'Rec_Mic', 'Rec_Mac',
        'Rec_Wgt', 'Jac_Mac', 'Jac_Wgt'
    ]
    preds_df = None
    report_df = None
    for i, grade in enumerate(['BE', 'ICM', 'TE']):
        y_preds = np.argmax(y_predicted[:, :3], axis=1)
        y_truth = y_groundtruth[:, i].round()

        metrics[i, 0] = balanced_accuracy_score(y_truth, y_preds)
        metrics[i, 1] = precision_score(y_truth, y_preds, average='micro')
        metrics[i, 2] = precision_score(y_truth, y_preds, average='macro')
        metrics[i, 3] = precision_score(y_truth, y_preds, average='weighted')
        metrics[i, 4] = recall_score(y_truth, y_preds, average='micro')
        metrics[i, 5] = recall_score(y_truth, y_preds, average='macro')
        metrics[i, 6] = recall_score(y_truth, y_preds, average='weighted')
        metrics[i, 7] = jaccard_score(y_truth, y_preds, average='macro')
        metrics[i, 8] = jaccard_score(y_truth, y_preds, average='weighted')

        if grade == 'BE':
            y_truth = [REBMUN.get(item, item) for item in y_truth]
            y_preds = [REBMUN.get(item, item) for item in y_preds]
            classes = [4, 3, 2]
        else:
            y_truth = [RETTEL.get(item, item) for item in y_truth]
            y_preds = [RETTEL.get(item, item) for item in y_preds]
            classes = ['A', 'B', 'C']
        class_report_index = pd.Index(
            [classes[0], classes[1], classes[2],\
            'micro avg', 'macro avg', 'weighted avg'])

        cmat = confusion_matrix(y_truth, y_preds)
        out_name = OUT_PATH + 'CM_{}fold{}.png'.format(grade, fold)
        plot_confusion_matrix(cmat=cmat, classes=classes, out_name=out_name)

        pred_results = pd.DataFrame({
            "Filenames": file_names,
            "Labels" + grade: y_truth,
            "Preds" + grade: y_preds
        })

        preds_df = pred_results.set_index("Filenames") if preds_df is None else \
                   preds_df.join(pred_results.set_index("Filenames"))

        class_report = classification_report(y_truth,
                                             y_preds,
                                             output_dict=True)
        class_report = pd.DataFrame(class_report).transpose()
        class_report = class_report.set_index(class_report_index)
        report_df = class_report if report_df is None else \
                    report_df.append(class_report)

    preds_df.to_csv(OUT_PATH + "Preds-fold{}.csv".format(fold), index=True)
    prerec_results = pd.DataFrame.from_dict({
        metrics_names[i]: [metrics[0, i], metrics[1, i], metrics[2, i]]
        for i in range(9)
    }).set_index(pd.Index(['BE', 'ICM', 'TE']))
    prerec_results.to_csv(OUT_PATH + "PrecRec-fold{}.csv".format(fold),
                          index=True)
    report_df.to_csv(OUT_PATH + "ClassificationReport-fold{}.csv".format(fold),
                     index=True)
    def compute_metrics_for_all_cubes(self, inference_full_image=True):

        cubes_to_use = []

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()
        torch.cuda.empty_cache()

        if "lidc" in self.dataset_name:
            return

        if hasattr(self.trainer, "model"):
            del self.trainer.model
            del self.trainer
            sleep(20)
            self.trainer = Trainer(config=self.config, dataset=None)
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        self.trainer.load_model(from_path=True, path=self.model_path, phase="sup", ensure_sup_is_completed=True)

        if inference_full_image is False:
            print("PATCHING Will be Done")

        full_cubes_used_for_testing = self.get_all_cubes_which_were_used_for_testing()
        full_cubes_used_for_training = self.get_all_cubes_which_were_used_for_training()
        cubes_to_use.extend(full_cubes_used_for_testing)
        cubes_to_use.extend(full_cubes_used_for_training)

        cubes_to_use_path = [os.path.join(self.dataset_dir, i) for i in cubes_to_use]
        label_cubes_of_cubes_to_use_path = [os.path.join(self.dataset_labels_dir, i) for i in cubes_to_use]

        metric_dict = dict()

        (
            dice_logits_test,
            dice_logits_train,
            dice_binary_test,
            dice_binary_train,
            jaccard_test,
            jaccard_train,
            hausdorff_test,
            hausdorff_train,
        ) = ([], [], [], [], [], [], [], [])

        for idx, cube_path in enumerate(cubes_to_use_path):
            np_array = self._load_cube_to_np_array(cube_path)  # (x,y,z)
            self.original_cube_dimensions = np_array.shape
            if sum([i for i in np_array.shape]) > 550 and self.two_dim is False:
                inference_full_image = False

            if self.dataset_name.lower() in ("task04_sup", "task01_sup", "cellari_heart_sup_10_192", "cellari_heart_sup"):
                if self.tried is False:
                    inference_full_image = True
                else:
                    inference_full_image = False

            if inference_full_image is False:
                print("CUBE TOO BIG, PATCHING")

                patcher = Patcher(np_array, two_dim=self.two_dim)

                with torch.no_grad():
                    self.trainer.model.eval()
                    for patch_idx, patch in patcher:

                        patch = torch.unsqueeze(patch, 0)  # (1,C,H,W or 1) -> (1,1,C,H,W or 1)
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            patch, pad_tuple = pad_if_necessary_one_array(patch, return_pad_tuple=True)

                        pred = self.trainer.model(patch)
                        assert pred.shape == patch.shape, "{} vs {}".format(pred.shape, patch.shape)
                        # need to then unpad to reconstruct
                        if self.two_dim is True:
                            raise RuntimeError("SHOULD  NOT BE USED HERE")

                        pred = self._unpad_3d_array(pred, pad_tuple)
                        pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                        # pred_mask = self._make_pred_mask_from_pred(pred)
                        patcher.predicitons_to_reconstruct_from[
                            :, patch_idx
                        ] = pred  # update array in patcher that will construct full cube predicted mask
                        del pred
                        dump_tensors()
                        torch.cuda.ipc_collect()
                        torch.cuda.empty_cache()
                        dump_tensors()

                pred_mask_full_cube = patcher.get_pred_mask_full_cube()

            else:

                full_cube_tensor = torch.Tensor(np_array)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (C,H,W) -> (1,C,H,W)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (1,C,H,W) -> (1,1,C,H,W)

                with torch.no_grad():
                    self.trainer.model.eval()
                    if self.two_dim is False:
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            full_cube_tensor, pad_tuple = pad_if_necessary_one_array(full_cube_tensor, return_pad_tuple=True)
                            try:
                                p = self.trainer.model(full_cube_tensor)
                                p.to("cpu")
                                pred = p
                                del p
                                dump_tensors()
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                dump_tensors()
                                torch.cuda.empty_cache()
                                pred = self._unpad_3d_array(pred, pad_tuple)
                                pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                                pred = torch.squeeze(pred, dim=0)
                                pred_mask_full_cube = pred  # self._make_pred_mask_from_pred(pred)
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                del pred

                            except RuntimeError as e:
                                if "out of memory" in str(e) or "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED" in str(e):
                                    print("TOO BIG FOR MEMORY, DEFAULTING TO PATCHING")
                                    # exit(0)
                                    dump_tensors()
                                    torch.cuda.ipc_collect()
                                    torch.cuda.empty_cache()
                                    dump_tensors()
                                    self.tried = True
                                    res = self.compute_metrics_for_all_cubes(inference_full_image=False)
                                    return res

                    else:
                        pred_mask_full_cube = torch.zeros(self.original_cube_dimensions)
                        for z_idx in range(full_cube_tensor.size()[-1]):
                            tensor_slice = full_cube_tensor[..., z_idx]  # SLICE : (1,1,C,H,W) -> (1,1,C,H)
                            assert tensor_slice.shape == (1, 1, self.original_cube_dimensions[0], self.original_cube_dimensions[1])
                            pred = self.trainer.model(tensor_slice)
                            pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H) -> (1,C,H)
                            pred = torch.squeeze(pred, dim=0)  # (1,C,H) -> (C,H)
                            pred_mask_slice = pred  # self._make_pred_mask_from_pred(pred)
                            pred_mask_full_cube[..., z_idx] = pred_mask_slice

            full_cube_label_tensor = torch.Tensor(self._load_cube_to_np_array(label_cubes_of_cubes_to_use_path[idx]))
            full_cube_label_tensor = self.adjust_label_cube_acording_to_dataset(full_cube_label_tensor)

            pred_mask_full_cube = pred_mask_full_cube.to("cpu")
            threshold = self._set_threshold(pred_mask_full_cube, full_cube_label_tensor)
            pred_mask_full_cube_binary = self._make_pred_mask_from_pred(pred_mask_full_cube, threshold=threshold)

            dice_score_soft = float(DiceLoss.dice_loss(pred_mask_full_cube, full_cube_label_tensor, return_loss=False))
            dice_score_binary = float(DiceLoss.dice_loss(pred_mask_full_cube_binary, full_cube_label_tensor, return_loss=False))
            hausdorff = hausdorff_distance(np.array(pred_mask_full_cube_binary), np.array(full_cube_label_tensor))
            x_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            y_flat = full_cube_label_tensor.contiguous().view(-1)
            x_flat = x_flat.cpu()
            y_flat = y_flat.cpu()
            jac_score = jaccard_score(y_flat, x_flat)

            if idx < len(full_cubes_used_for_testing):
                dice_logits_test.append(dice_score_soft)
                dice_binary_test.append(dice_score_binary)
                jaccard_test.append(jac_score)
                hausdorff_test.append(hausdorff)
            else:
                dice_logits_train.append(dice_score_soft)
                dice_binary_train.append(dice_score_binary)
                jaccard_train.append(jac_score)
                hausdorff_train.append(hausdorff)

            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(10)
            print(idx)

        avg_jaccard_test = sum(jaccard_test) / len(jaccard_test)
        avg_jaccard_train = sum(jaccard_train) / len(jaccard_train)

        avg_dice_test_soft = sum(dice_logits_test) / len(dice_logits_test)
        avg_dice_test_binary = sum(dice_binary_test) / len(dice_binary_test)

        avg_dice_train_soft = sum(dice_logits_train) / len(dice_logits_train)
        avg_dice_train_binary = sum(dice_binary_train) / len(dice_binary_train)

        avg_hausdorff_train = sum(hausdorff_train) / len(hausdorff_train)
        avg_hausdorff_test = sum(hausdorff_test) / len(hausdorff_test)

        metric_dict["dice_test_soft"] = avg_dice_test_soft
        metric_dict["dice_test_binary"] = avg_dice_test_binary
        metric_dict["dice_train_soft"] = avg_dice_train_soft
        metric_dict["dice_train_binary"] = avg_dice_train_binary
        metric_dict["jaccard_test"] = avg_jaccard_test
        metric_dict["jaccard_train"] = avg_jaccard_train
        metric_dict["hausdorff_test"] = avg_hausdorff_test
        metric_dict["hausdorff_train"] = avg_hausdorff_train

        return metric_dict
print(__doc__)

# Load a multi-label dataset from https://www.openml.org/d/40597
X, Y = fetch_openml('yeast', version=4, return_X_y=True)
Y = Y == 'TRUE'
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
                                                    random_state=0)

# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average='samples')

# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(base_lr, order='random', random_state=i)
          for i in range(10)]
for chain in chains:
    chain.fit(X_train, Y_train)

Y_pred_chains = np.array([chain.predict(X_test) for chain in
                          chains])
chain_jaccard_scores = [jaccard_score(Y_test, Y_pred_chain >= .5,
                                      average='samples')
                        for Y_pred_chain in Y_pred_chains]

Y_pred_ensemble = Y_pred_chains.mean(axis=0)
    def save_segmentation_examples(self, nr_cubes=3, inference_full_image=True):

        # deal with recursion when defaulting to patchign

        if "lidc" in self.dataset_name:
            return

        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        if hasattr(self.trainer, "model"):
            del self.trainer.model
            del self.trainer
            sleep(15)
            self.trainer = Trainer(config=self.config, dataset=None)
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(5)
        if inference_full_image is False:
            print("PATCHING Will be Done")

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        self.trainer.load_model(from_path=True, path=self.model_path, phase="sup", ensure_sup_is_completed=True)

        cubes_to_use = []
        cubes_to_use.extend(self.sample_k_full_cubes_which_were_used_for_testing(nr_cubes))
        cubes_to_use.extend(self.sample_k_full_cubes_which_were_used_for_training(nr_cubes))

        cubes_to_use_path = [os.path.join(self.dataset_dir, i) for i in cubes_to_use]
        label_cubes_of_cubes_to_use_path = [os.path.join(self.dataset_labels_dir, i) for i in cubes_to_use]

        for cube_idx, cube_path in enumerate(cubes_to_use_path):
            np_array = self._load_cube_to_np_array(cube_path)  # (x,y,z)
            self.original_cube_dimensions = np_array.shape
            if sum([i for i in np_array.shape]) > 550 and self.two_dim is False:
                inference_full_image = False

            if self.dataset_name.lower() in ("task04_sup", "task01_sup", "cellari_heart_sup_10_192", "cellari_heart_sup"):
                if self.tried is False:
                    inference_full_image = True
                else:
                    inference_full_image = False

            if inference_full_image is False:
                print("CUBE TOO BIG, PATCHING")
                patcher = Patcher(np_array, two_dim=self.two_dim)

                with torch.no_grad():
                    self.trainer.model.eval()
                    for idx, patch in patcher:

                        patch = torch.unsqueeze(patch, 0)  # (1,C,H,W or 1) -> (1,1,C,H,W or 1)
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            patch, pad_tuple = pad_if_necessary_one_array(patch, return_pad_tuple=True)

                        pred = self.trainer.model(patch)
                        assert pred.shape == patch.shape, "{} vs {}".format(pred.shape, patch.shape)
                        # need to then unpad to reconstruct
                        if self.two_dim is True:
                            raise RuntimeError("SHOULD  NOT BE USED HERE")

                        pred = self._unpad_3d_array(pred, pad_tuple)
                        pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                        pred_mask = pred  # self._make_pred_mask_from_pred(pred)
                        del pred

                        patcher.predicitons_to_reconstruct_from[
                            :, idx
                        ] = pred_mask  # update array in patcher that will construct full cube predicted mask

                        dump_tensors()
                        torch.cuda.ipc_collect()
                        torch.cuda.empty_cache()
                        dump_tensors()

                pred_mask_full_cube = patcher.get_pred_mask_full_cube()
                # segmentations.append(patcher.get_pred_mask_full_cube())
            else:

                full_cube_tensor = torch.Tensor(np_array)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (C,H,W) -> (1,C,H,W)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (1,C,H,W) -> (1,1,C,H,W)

                with torch.no_grad():
                    self.trainer.model.eval()
                    if self.two_dim is False:
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            full_cube_tensor, pad_tuple = pad_if_necessary_one_array(full_cube_tensor, return_pad_tuple=True)
                            try:
                                p = self.trainer.model(full_cube_tensor)
                                p.to("cpu")
                                pred = p
                                del p
                                dump_tensors()
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                dump_tensors()
                                torch.cuda.empty_cache()
                                pred = self._unpad_3d_array(pred, pad_tuple)
                                pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                                pred = torch.squeeze(pred, dim=0)
                                pred_mask_full_cube = pred  # self._make_pred_mask_from_pred(pred)
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                del pred

                            except RuntimeError as e:
                                if "out of memory" in str(e) or "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED" in str(e):
                                    print("TOO BIG FOR MEMORY, DEFAULTING TO PATCHING")
                                    # exit(0)
                                    dump_tensors()
                                    torch.cuda.ipc_collect()
                                    torch.cuda.empty_cache()
                                    dump_tensors()
                                    self.tried = True
                                    self.save_segmentation_examples(inference_full_image=False)
                                    return

                            # segmentations.append(pred_mask_full_cube)
                    else:
                        pred_mask_full_cube = torch.zeros(self.original_cube_dimensions)
                        for z_idx in range(full_cube_tensor.size()[-1]):
                            tensor_slice = full_cube_tensor[..., z_idx]  # SLICE : (1,1,C,H,W) -> (1,1,C,H)
                            assert tensor_slice.shape == (1, 1, self.original_cube_dimensions[0], self.original_cube_dimensions[1])
                            pred = self.trainer.model(tensor_slice)
                            pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H) -> (1,C,H)
                            pred = torch.squeeze(pred, dim=0)  # (1,C,H) -> (C,H)
                            pred_mask_slice = pred  # self._make_pred_mask_from_pred(pred)
                            pred_mask_full_cube[..., z_idx] = pred_mask_slice

                        # segmentations.append(pred_mask_full_cube)

            # for idx, pred_mask_full_cube in enumerate(segmentations):

            print(cube_idx)

            if cube_idx < nr_cubes:
                if inference_full_image is True:
                    save_dir = os.path.join(self.save_dir, self.dataset_name, "testing_examples_full/", cubes_to_use[cube_idx][:-4])
                else:
                    save_dir = os.path.join(
                        self.save_dir, self.dataset_name, "testing_examples_full/", cubes_to_use[cube_idx][:-4] + "_with_patcher"
                    )
            else:
                if inference_full_image is True:
                    save_dir = os.path.join(self.save_dir, self.dataset_name, "training_examples_full/", cubes_to_use[cube_idx][:-4])
                else:
                    save_dir = os.path.join(
                        self.save_dir, self.dataset_name, "training_examples_full/", cubes_to_use[cube_idx][:-4] + "_with_patcher"
                    )

            make_dir(save_dir)

            # save nii of segmentation
            pred_mask_full_cube = pred_mask_full_cube.cpu()  # logits mask
            pred_mask_full_cube_binary = self._make_pred_mask_from_pred(pred_mask_full_cube)  # binary mask

            nifty_img = nibabel.Nifti1Image(np.array(pred_mask_full_cube).astype(np.float32), np.eye(4))
            nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_logits_mask.nii.gz"))

            nifty_img = nibabel.Nifti1Image(np.array(pred_mask_full_cube_binary).astype(np.float32), np.eye(4))
            nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_binary_mask.nii.gz"))

            # save .nii.gz of cube if is npy original full cube file
            if ".npy" in cube_path:
                nifty_img = nibabel.Nifti1Image(np_array.astype(np.float32), np.eye(4))
                nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_cube.nii.gz"))

            # self.save_3d_plot(np.array(pred_mask_full_cube), os.path.join(save_dir, "{}_plt3d.png".format(cubes_to_use[idx])))

            label_tensor_of_cube = torch.Tensor(self._load_cube_to_np_array(label_cubes_of_cubes_to_use_path[cube_idx]))
            label_tensor_of_cube = self.adjust_label_cube_acording_to_dataset(label_tensor_of_cube)
            label_tensor_of_cube_masked = np.array(label_tensor_of_cube)
            label_tensor_of_cube_masked = np.ma.masked_where(
                label_tensor_of_cube_masked < 0.5, label_tensor_of_cube_masked
            )  # it's binary anyway

            pred_mask_full_cube_binary_masked = np.array(pred_mask_full_cube_binary)
            pred_mask_full_cube_binary_masked = np.ma.masked_where(
                pred_mask_full_cube_binary_masked < 0.5, pred_mask_full_cube_binary_masked
            )  # it's binary anyway

            pred_mask_full_cube_logits_masked = np.array(pred_mask_full_cube)
            pred_mask_full_cube_logits_masked = np.ma.masked_where(
                pred_mask_full_cube_logits_masked < 0.3, pred_mask_full_cube_logits_masked
            )  # it's binary anyway

            make_dir(os.path.join(save_dir, "slices/"))

            for z_idx in range(pred_mask_full_cube.shape[-1]):

                # binary
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(pred_mask_full_cube_binary_masked[:, :, z_idx], cmap="Accent")
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_binary.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # logits
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(pred_mask_full_cube_logits_masked[:, :, z_idx], cmap="Blues", alpha=0.5)
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_logits.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # dist of logits histogram
                distribution_logits = np.array(pred_mask_full_cube[:, :, z_idx].contiguous().view(-1))
                fig = plt.figure(figsize=(10, 5))
                plt.hist(distribution_logits, bins=np.arange(min(distribution_logits), max(distribution_logits) + 0.05, 0.05))
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_logits_histogram.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # save ground truth as wel, overlayed on original
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(label_tensor_of_cube_masked[:, :, z_idx], cmap="jet")
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_gt.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

            dice_score_soft = float(DiceLoss.dice_loss(pred_mask_full_cube, label_tensor_of_cube, return_loss=False))
            dice_score_binary = float(DiceLoss.dice_loss(pred_mask_full_cube_binary, label_tensor_of_cube, return_loss=False))
            x_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            y_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            x_flat = x_flat.cpu()
            y_flat = y_flat.cpu()
            jaccard_scr = jaccard_score(y_flat, x_flat)
            metrics = {"dice_logits": dice_score_soft, "dice_binary": dice_score_binary, "jaccard": jaccard_scr}
            # print(dice)
            with open(os.path.join(save_dir, "dice.json"), "w") as f:
                json.dump(metrics, f)

            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(10)
 def metrics_for_predicted_items(self):
     '''CALCULATING METRICS BASED ON EACH PREDICTED ITEM (ARRAY OF AN IMAGE)'''
     if self.number_of_classes == 2:
         print(
             'Confusion matrix - rows = actual, columns = predicted: 0 - {}, 1 - {}'
             .format(self.conf_matrix_labels[0],
                     self.conf_matrix_labels[1]))
     else:
         print(
             'Confusion matrix - rows = actual, columns = predicted: 0 - {}, 1 - {}, 2 - {}'
             .format(self.conf_matrix_labels[0], self.conf_matrix_labels[1],
                     self.conf_matrix_labels[2]))
     # _, _, files = next(os.walk(ground_truth_folder))
     statistics_per_image_path = self.model_stat_folder + '/for_each_image'
     os.makedirs(statistics_per_image_path)
     filenames = np.load(self.filenames_array_path)
     ground_truth_array = np.load(self.ground_truth_array_path)
     predicted_images_array = np.load(self.predicted_images_one_hot)
     filenames = filenames.tolist()
     image_counter = 0
     for filename in filenames:
         ground_truth_item = ground_truth_array[image_counter]
         ground_truth_item_reshaped = np.reshape(
             ground_truth_item,
             (ground_truth_item.shape[0]**2, self.number_of_classes))
         predicted_item = predicted_images_array[image_counter]
         predicted_item_reshaped = np.reshape(
             predicted_item,
             (predicted_item.shape[0]**2, self.number_of_classes))
         ground_truth_item_a = np.argmax(ground_truth_item_reshaped, axis=1)
         predicted_item_a = np.argmax(predicted_item_reshaped, axis=1)
         acc = accuracy_score(ground_truth_item_a, predicted_item_a)
         class_report = classification_report(
             ground_truth_item_a,
             predicted_item_a,
             labels=self.labels,
             target_names=self.conf_matrix_labels)
         conf_matrix = confusion_matrix(ground_truth_item_a,
                                        predicted_item_a,
                                        labels=self.labels)
         jacc_score = jaccard_score(ground_truth_item_a,
                                    predicted_item_a,
                                    labels=self.labels,
                                    average=None)
         # dice_score = prediction.dice_coef(ground_truth_item_reshaped, predicted_item_reshaped)
         print('FOR IMAGE *************** {} ***************'.format(
             filename))
         print('Confusion matrix')
         print(conf_matrix)
         print('Accuracy')
         print(acc)
         print('Classification report')
         print(class_report)
         # print ('Dice score (F1_score): {:.4f}'.format(dice_score))
         # print ('Jaccard score (IoU): {:.4f}'.format(jacc_score))
         class_report_dict = classification_report(
             ground_truth_item_a,
             predicted_item_a,
             labels=self.labels,
             target_names=self.conf_matrix_labels,
             output_dict=True)
         class_report_df = pd.DataFrame(
             class_report_dict).transpose().round(4)
         jacc_list = np.around(jacc_score, decimals=4).tolist()
         jacc_list = jacc_list + ['NaN', 'NaN', 'NaN']
         if self.number_of_classes == 2:
             acc_list = [acc.round(4), 'NaN', 'NaN', 'NaN', 'NaN']
         else:
             acc_list = [acc.round(4), 'NaN', 'NaN', 'NaN', 'NaN', 'NaN']
         class_report_df['Accuracy'], class_report_df['Jaccard'] = [
             acc_list, jacc_list
         ]
         confusion_matrix_df = pd.DataFrame(data=conf_matrix,
                                            index=self.conf_matrix_labels,
                                            columns=self.conf_matrix_labels)
         # df_combined = pd.concat([class_report_df, confusion_matrix_df], axis =0, ignore_index=True)
         # ipdb.set_trace()
         class_report_csv_path = statistics_per_image_path + '/' + 'class_report_' + str(
             filename[:-4]) + '.csv'
         class_report_df.to_csv(class_report_csv_path,
                                index=True,
                                header=True)
         confusion_matrix_csv_path = statistics_per_image_path + '/' + 'confusion_matrix_' + str(
             filename[:-4]) + '.csv'
         confusion_matrix_df.to_csv(confusion_matrix_csv_path,
                                    index=True,
                                    header=True)
         image_counter += 1
示例#33
0
def print_predict(ground_truth, prediction, hyper_params):
    rounded = 4
    print(ground_truth.shape, prediction.shape)
    try:
        AUC_macro = round(
            roc_auc_score(ground_truth, prediction, average='macro'), rounded)
        AUC_micro = round(
            roc_auc_score(ground_truth, prediction, average='micro'), rounded)
    except:
        # ValueError, only 1 class present
        AUC_macro, AUC_micro = 0.0, 0.0
    # print(ground_truth[:10])
    # print(prediction[:10])
    try:
        Coverage_error = round(
            (coverage_error(ground_truth, prediction)) / ground_truth.shape[1],
            rounded)
        rankloss = round(label_ranking_loss(ground_truth, prediction), rounded)
        One_error = round(one_error(ground_truth, prediction), rounded)
        Precision_at_ks = precision_at_ks(ground_truth, prediction)
        Log_loss = round(log_loss(ground_truth, prediction), rounded)
        Average_precision_score = round(
            average_precision_score(ground_truth, prediction), rounded)
    except:
        Coverage_error, rankloss, One_error, Precision_at_ks, Log_loss, Average_precision_score = 0, 0, 0, 0, 0, 0

    # prediction = np.round(prediction)
    thresh = 0.5
    print(f"Threshold at {thresh}")
    prediction = np.where(prediction > thresh, 1,
                          0)  # same as round but with 0.4
    # print(ground_truth)
    # print(prediction)
    F1_Micro = round(f1_score(ground_truth, prediction, average='micro'),
                     rounded)
    F1_Macro = round(f1_score(ground_truth, prediction, average='macro'),
                     rounded)
    Hamming_loss = round(hamming_loss(ground_truth, prediction), rounded)
    Accuracy = round(accuracy_score(ground_truth, prediction), rounded)
    Recall_score_macro = round(
        recall_score(ground_truth, prediction, average='macro'), rounded)
    Recall_score_micro = round(
        recall_score(ground_truth, prediction, average='micro'), rounded)
    Precision_score_macro = round(
        precision_score(ground_truth, prediction, average='macro'), rounded)
    Precision_score_micro = round(
        precision_score(ground_truth, prediction, average='micro'), rounded)
    Jaccard_score_macro = round(
        jaccard_score(ground_truth, prediction, average='macro'), rounded)
    Jaccard_score_micro = round(
        jaccard_score(ground_truth, prediction, average='micro'), rounded)

    print('Recall_score_macro:   ', Recall_score_macro)
    print('Recall_score_micro:   ', Recall_score_micro)
    print('Precision_score_macro:   ', Precision_score_macro)
    print('Precision_score_micro:   ', Precision_score_micro)
    print('F1_Micro ', F1_Micro)
    print('F1_Macro ', F1_Macro)
    print('Hamming_loss: ', Hamming_loss)
    print("Accuracy = ", Accuracy)
    print('Jaccard_score_macro:   ', Jaccard_score_macro)
    print('Jaccard_score_micro:   ', Jaccard_score_micro)

    print('precision_at_ks: ', Precision_at_ks)

    print('Log_loss:  ', Log_loss)
    print('Average_precision_score: ', Average_precision_score)

    print('One_error: ', One_error)
    print('Ranking loss: ', rankloss)
    print('coverage: ', Coverage_error)
    print('AUC-micro:   ', AUC_micro)
    print('AUC-macro:   ', AUC_macro)

    print('\n')
    return [F1_Micro, F1_Macro, AUC_micro, AUC_macro, Recall_score_micro]