Пример #1
0
def _multidim_multiclass_prob_sk_metric(preds, target, normalize=None):
    sk_preds = torch.argmax(preds, dim=len(preds.shape) - 2).view(-1).numpy()
    sk_target = target.view(-1).numpy()

    return sk_confusion_matrix(y_true=sk_target,
                               y_pred=sk_preds,
                               normalize=normalize)
Пример #2
0
def _multidim_multiclass_sk_metric(preds, target, normalize=None):
    sk_preds = preds.view(-1).numpy()
    sk_target = target.view(-1).numpy()

    return sk_confusion_matrix(y_true=sk_target,
                               y_pred=sk_preds,
                               normalize=normalize)
Пример #3
0
def _sk_cm_multilabel(preds, target, normalize=None):
    sk_preds = preds.view(-1).numpy()
    sk_target = target.view(-1).numpy()

    return sk_confusion_matrix(y_true=sk_target,
                               y_pred=sk_preds,
                               normalize=normalize)
Пример #4
0
def _multilabel_prob_sk_metric(preds, target, normalize=None):
    sk_preds = (preds.view(-1).numpy() >= THRESHOLD).astype(np.uint8)
    sk_target = target.view(-1).numpy()

    return sk_confusion_matrix(y_true=sk_target,
                               y_pred=sk_preds,
                               normalize=normalize)
Пример #5
0
def confusion_matrix(y_true, y_predicted, labels):
    df = pd.DataFrame(data=sk_confusion_matrix(y_true, y_predicted),
                      index=labels,
                      columns=labels)
    df.index.name = 'true classes'
    df.columns.name = 'predicted classes'
    return df
Пример #6
0
def confusion_matrix(y_test, y_pred):
    cm = sk_confusion_matrix(y, y_pred)
    cm = pd.DataFrame(data=cm, columns=[-1, 1], index=[-1, 1])
    cm.columns.name = 'Predicted label'
    cm.index.name = 'True label'
    error_rate = (y_pred != y).mean()
    print('error rate: %.2f' % error_rate)
    return cm
def draw_confusion_matrix(y_test, y_pred, labels):
    cm = sk_confusion_matrix(y_test, y_pred)
    cm = pd.DataFrame(data=cm, columns=labels, index=labels)
    cm.columns.name = 'Predicted label'
    cm.index.name = 'True label'
    error_rate = (y_pred != y_test).mean()
    print('mean error rate: %.2f' % error_rate)
    return cm
Пример #8
0
def confusion_matrix(y_test, y_pred):
    cm = sk_confusion_matrix(y, y_pred)
    cm = pd.DataFrame(data=cm, columns=[-1, 1], index=[-1, 1])
    cm.columns.name = 'Predicted label'
    cm.index.name = 'True label'
    error_rate = (y_pred != y).mean()
    print('error rate: %.2f' % error_rate)
    return cm
Пример #9
0
def draw_confusion_matrix(y_test, y_pred, labels):
    cm = sk_confusion_matrix(y_test, y_pred)
    cm = pd.DataFrame(data=cm, columns=labels, index=labels)
    cm.columns.name = 'Predicted label'
    cm.index.name = 'True label'
    error_rate = (y_pred != y_test).mean()
    print('mean error rate: %.2f' % error_rate)
    return cm
Пример #10
0
def test_confusion_matrix_random(n_samples, dtype, problem_type):
    upper_range = 2 if problem_type == 'binary' else 1000

    y_true, y_pred, _, _ = generate_random_labels(
        lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype))
    cm = confusion_matrix(y_true, y_pred)
    ref = sk_confusion_matrix(y_true, y_pred)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
Пример #11
0
def test_confusion_matrix_multiclass_subset_labels(labels):
    y_true, y_pred, _, _ = generate_random_labels(
        lambda rng: rng.randint(0, 3, 10).astype(np.int32))

    ref = sk_confusion_matrix(y_true, y_pred, labels=labels)
    labels = cp.array(labels, dtype=np.int32)
    cm = confusion_matrix(y_true, y_pred, labels=labels)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
Пример #12
0
def test_confusion_matrix_multiclass_subset_labels(labels, client):
    y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
        lambda rng: rng.randint(0, 3, 10).astype(np.int32), as_cupy=True)
    y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)

    ref = sk_confusion_matrix(np_y_true, np_y_pred, labels=labels)
    labels = cp.array(labels, dtype=np.int32)
    cm = confusion_matrix(y_true, y_pred, labels=labels)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
Пример #13
0
def confusion_matrix(y_test, y_pred):
    cm = sk_confusion_matrix(y_test, y_pred)
    if cm.shape[1] == 2:
        cm = pd.DataFrame(data=cm, columns=[0, 1], index=[0, 1])
    else:
        cm = pd.DataFrame(data=cm, columns=[1, 2, 3], index=[1, 2, 3])
    cm.columns.name = 'Predicted label'
    cm.index.name = 'True label'
    return cm
Пример #14
0
 def make_confusion_matrix_report(clf, x, y):
     assert len(clf.classes_) == 2
     assert clf.classes_[0] == 0
     assert clf.classes_[1] == 1
     y_pred = clf.predict(x)
     cm = sk_confusion_matrix(y, y_pred)
     cm = pd.DataFrame(data=cm, columns=[0, 1], index=[0, 1])
     cm.columns.name = 'Predicted label'
     cm.index.name = 'True label'
     return "\n%s\n" % str(cm)
Пример #15
0
def test_confusion_matrix_random_weights(n_samples, dtype, weights_dtype):
    y_true, y_pred, _, _ = generate_random_labels(
        lambda rng: rng.randint(0, 10, n_samples).astype(dtype))

    if weights_dtype == 'int':
        sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
    else:
        sample_weight = np.random.RandomState(0).rand(n_samples)

    cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
    ref = sk_confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
Пример #16
0
def test_confusion_matrix_random(n_samples, dtype, problem_type, cluster):
    client = Client(cluster)
    upper_range = 2 if problem_type == 'binary' else 1000

    y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
        lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype),
        as_cupy=True)
    y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)

    cm = confusion_matrix(y_true, y_pred)
    ref = sk_confusion_matrix(np_y_true, np_y_pred)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
    client.close()
Пример #17
0
def confusion_matrix(learn: Learner,
                     dl: DeviceDataLoader,
                     thres: float = None) -> [np.ndarray, np.ndarray]:
    """ Compute confusion matrix.

    Args:
        learn: trained model
        dl: dataloader with images and ground truth masks
        thres: threshold under which to reject predicted label and set to class-id 0 instead.

    Return:
        The un-normalized and the normalized confusion matrices.
    """
    y_gts = []
    y_preds = []

    # Loop over all images
    for im_path, gt_path in zip(dl.x.items, dl.y.items):
        pred_mask, _ = predict(im_path, learn, thres)

        # load ground truth and resize to be same size as predited mask
        gt_mask = PIL.Image.open(gt_path)
        gt_mask = gt_mask.resize(pred_mask.shape[::-1],
                                 resample=PIL.Image.NEAREST)
        gt_mask = np.asarray(gt_mask)

        # Store predicted and ground truth labels
        assert len(gt_mask.flatten()) == len(pred_mask.flatten())

        y_gts.extend(gt_mask.flatten())
        y_preds.extend(pred_mask.flatten())

    # Compute confusion matrices
    cmat = sk_confusion_matrix(y_gts, y_preds)
    cmat_norm = sk_confusion_matrix(y_gts, y_preds, normalize="true")

    return cmat, cmat_norm
Пример #18
0
def _confusion_matrix(targets, predictions):
    """Helper function to produce a confusion matrix after properly formatting `predictions`

    Parameters
    ----------
    targets: Array-like
        The target/expected output labels for each of the given `predictions`
    predictions: Array-like
        The predicted values corresponding to each of the elements in `targets`

    Returns
    -------
    Array-like
        A confusion matrix for the given `targets` and `predictions`"""
    return sk_confusion_matrix(targets, get_clean_prediction(targets, predictions))
Пример #19
0
def confusion_matrix(y_true, y_pred, target_names, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
    cm = sk_confusion_matrix(y_true, y_pred)
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    np.set_printoptions(precision=2)
    fig = Figure()
    canvas = FigureCanvas(fig)
    ax = fig.add_subplot(111)
    im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    fig.colorbar(im)
    tick_marks = np.arange(len(target_names))
    ax.set_xticks(tick_marks)
    ax.set_xticklabels(target_names, rotation=45)
    ax.set_yticks(tick_marks)
    ax.set_yticklabels(target_names)
    fig.tight_layout()
    ax.set_title(title)
    ax.set_ylabel('True label')
    ax.set_xlabel('Predicted label')
    return fig
Пример #20
0
def test_confusion_matrix_random_weights(n_samples, dtype, weights_dtype,
                                         client):
    y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
        lambda rng: rng.randint(0, 10, n_samples).astype(dtype), as_cupy=True)
    y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)

    if weights_dtype == 'int':
        sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
    else:
        sample_weight = np.random.RandomState(0).rand(n_samples)

    ref = sk_confusion_matrix(np_y_true,
                              np_y_pred,
                              sample_weight=sample_weight)

    sample_weight = cp.array(sample_weight)
    sample_weight = da.from_array(sample_weight)

    cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
    cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
Пример #21
0
def confusion_matrix(model, dataset_loader):
    targets = []
    outputs = []

    for stft, y in dataset_loader:

        target_class = np.argmax(y, axis=1)
        targets = np.append(targets, target_class)
        with torch.no_grad():
            logits = model(stft)
        predicted_class = np.argmax(logits.cpu().detach().numpy(), axis=1)
        outputs = np.append(outputs, predicted_class)

    Confusion_matrix = sk_confusion_matrix(targets.tolist(), outputs.tolist())
    print('Confusion_matrix:')
    print(Confusion_matrix)
    Se = Confusion_matrix[0][0] / (sum(Confusion_matrix[0]))
    Sq = (Confusion_matrix[1][1] + Confusion_matrix[2][2] +
          Confusion_matrix[3][3]) / (sum(Confusion_matrix[1]) + sum(
              Confusion_matrix[2]) + sum(Confusion_matrix[3]))
    return Se, Sq, (Se + Sq) / 2
def error_rate(predictions, labels):
  Confusion_matrix=sk_confusion_matrix(numpy.argmax(predictions, 1).tolist(), labels.tolist())
  print('Confusion_matrix:')
  print(Confusion_matrix)  

  Se1 = Confusion_matrix[1,1]+Confusion_matrix[2,2]+Confusion_matrix[3,3]
  Se2 = Confusion_matrix[1,1]+Confusion_matrix[1,0]+Confusion_matrix[1,2]+Confusion_matrix[1,3]+Confusion_matrix[2,2]+Confusion_matrix[2,0]+Confusion_matrix[2,1]+Confusion_matrix[2,3]+Confusion_matrix[3,3]+Confusion_matrix[3,0]+Confusion_matrix[3,1]+Confusion_matrix[3,2]
  Se = Se1/Se2
  Sp = Confusion_matrix[0,0]/(Confusion_matrix[0,0]+Confusion_matrix[0,1]+Confusion_matrix[0,2]+Confusion_matrix[0,3]) 
  Acc = (Se+Sp)*100/2

  target_names = ['class 0', 'class 1', 'class 2', 'class 3']

  print()
  accuracy = 100.0-(100.0 *numpy.sum(numpy.argmax(predictions, 1) == labels)/predictions.shape[0])
  
  """Return the error rate based on dense predictions and sparse labels."""
  return 100.0 - (
      100.0 *
      numpy.sum(numpy.argmax(predictions, 1) == labels) /
      predictions.shape[0]), Acc
Пример #23
0
def confusion_matrix(model, dataset_loader):
    targets = []
    outputs = []

    for stft, mfcc, y in dataset_loader:
        stft = stft.to(device)
        mfcc = mfcc.to(device)
        y = one_hot(np.array(y.numpy()), 4)

        target_class = np.argmax(y, axis=1)
        targets = np.append(targets, target_class)
        predicted_class = np.argmax(model(stft, mfcc).cpu().detach().numpy(),
                                    axis=1)
        outputs = np.append(outputs, predicted_class)

    Confusion_matrix = sk_confusion_matrix(targets.tolist(), outputs.tolist())
    print('Confusion_matrix:')
    print(Confusion_matrix)
    target_names = ['class 0', 'class 1', 'class 2', 'class 3']
    print('classification_report:')
    print(
        classification_report(targets.tolist(),
                              outputs.tolist(),
                              target_names=target_names))
 def __str__(self):
     cmx = sk_confusion_matrix(*self._fix_label_prediction_representation())
     return f"{cmx}"
Пример #25
0
def confusion_matrix(y_true,
                     y_pred,
                     target_names=None,
                     normalize=False,
                     cmap=None,
                     ax=None):
    """
    Plot confustion matrix.

    Parameters
    ----------
    y_true : array-like, shape = [n_samples]
        Correct target values (ground truth).
    y_pred : array-like, shape = [n_samples]
        Target predicted classes (estimator predictions).
    target_names : list
        List containing the names of the target classes. List must be in order
        e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
        generic labels will be generated e.g. ``['Class 0', 'Class 1']``
    ax: matplotlib Axes
        Axes object to draw the plot onto, otherwise uses current Axes
    normalize : bool
        Normalize the confusion matrix
    cmap : matplotlib Colormap
        If ``None`` uses a modified version of matplotlib's OrRd colormap.

    Notes
    -----
    http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html


    Returns
    -------
    ax: matplotlib Axes
        Axes containing the plot

    Examples
    --------
    .. plot:: ../../examples/confusion_matrix.py

    """
    if any((val is None for val in (y_true, y_pred))):
        raise ValueError("y_true and y_pred are needed to plot confusion "
                         "matrix")

    # calculate how many names you expect
    values = set(y_true).union(set(y_pred))
    expected_len = len(values)

    if target_names and (expected_len != len(target_names)):
        raise ValueError(
            ('Data cointains {} different values, but target'
             ' names contains {} values.'.format(expected_len,
                                                 len(target_names))))

    # if the user didn't pass target_names, create generic ones
    if not target_names:
        values = list(values)
        values.sort()
        target_names = ['Class {}'.format(v) for v in values]

    cm = sk_confusion_matrix(y_true, y_pred)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    np.set_printoptions(precision=2)

    if ax is None:
        ax = plt.gca()

    # this (y, x) may sound counterintuitive. The reason is that
    # in a matrix cell (i, j) is in row=i and col=j, translating that
    # to an x, y plane (which matplotlib uses to plot), we need to use
    # i as the y coordinate (how many steps down) and j as the x coordinate
    # how many steps to the right.
    for (y, x), v in np.ndenumerate(cm):
        try:
            label = '{:.2}'.format(v)
        except:
            label = v
        ax.text(x,
                y,
                label,
                horizontalalignment='center',
                verticalalignment='center')

    if cmap is None:
        cmap = default_heatmap()

    im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.colorbar(im, ax=ax)
    tick_marks = np.arange(len(target_names))
    ax.set_xticks(tick_marks)
    ax.set_xticklabels(target_names)
    ax.set_yticks(tick_marks)
    ax.set_yticklabels(target_names)

    title = 'Confusion matrix'
    if normalize:
        title += ' (normalized)'
    ax.set_title(title)

    ax.set_ylabel('True label')
    ax.set_xlabel('Predicted label')
    return ax
Пример #26
0
    for filename_csv in [
            result_prefix + '_results_train.csv',
            result_prefix + '_results_valid.csv'
    ]:
        print('computer confusion matrix:', filename_csv + '\n')

        df = pd.read_csv(filename_csv, delimiter=',')
        gt_label = df['labels'].tolist()

        for i in range(0, NUM_CLASSES):
            y_true = encoding_labels(gt_label, i)
            list_prob = df['class' + str(i)].tolist()
            y_pred = convert_prob_to_label(list_prob,
                                           i,
                                           list_threshold=LIST_THRESHOLD)
            cf1 = sk_confusion_matrix(y_true, y_pred)

            print(str(i) + ':')
            print(cf1)
            # sk_roc_auc_score(y_true, y_score)

#endregion

if COMPUTE_EXCLUSION_NUM:
    num_exclusion = 0

    for filename_csv in [
            result_prefix + 'results_train.csv',
            result_prefix + 'results_valid.csv'
    ]:
        print('computer confusion matrix:', filename_csv + '\n')
Пример #27
0
    if pat_id not in dict_patient_predict:
        dict_patient_predict[pat_id] = 0
    elif dict_patient_predict[pat_id] == 0 and np.argmax(probs[i]) == 1:
        dict_patient_predict[pat_id] = 1

list_label_gt = []
list_label_predict = []

for (k, v) in dict_patient_gt.items():
    list_label_gt.append(v)
    list_label_predict.append(dict_patient_predict[k])

from sklearn.metrics import confusion_matrix as sk_confusion_matrix

labels = [x for x in range(0, 2)]
confusion_matrix = sk_confusion_matrix(list_label_gt,
                                       list_label_predict,
                                       labels=labels)

print('OK')
'''
train
0 = [1256, 43]
1 = [9, 249]

validation
0 = [150, 8]
1 = [1, 34]

'''
Пример #28
0
        def on_epoch_end(self, epoch, logs=None):
            try:
                with open(config_file_realtime, 'r') as json_file:
                    data = json.load(json_file)

                    if data['epoch_compute_cf_train'] == 1:
                        compute_cf_train = True
                    else:
                        compute_cf_train = False

                    if data['epoch_compute_cf_valid'] == 1:
                        compute_cf_valid = True
                    else:
                        compute_cf_valid = False

                    if data['epoch_compute_cf_test'] == 1:
                        compute_cf_test = True
                    else:
                        compute_cf_test = False
            except:
                print('read realtime helper file error!')
                compute_cf_train = True
                compute_cf_valid = True
                compute_cf_test = True

            if compute_cf_train:
                print('calculate confusion matrix of training dataset...')
                generator_cf_train = My_images_generator_2d(
                    train_files,
                    train_labels,
                    num_output=num_classes,
                    batch_size=batch_size_train,
                    image_shape=input_shape)
                i = 0
                for x_train, y_train in generator_cf_train.gen():
                    probabilities = self.model.predict(x_train)
                    if i == 0:
                        probs = probabilities
                    else:
                        probs = np.vstack((probs, probabilities))

                    i += 1
                    if i == math.ceil(len(train_files) / batch_size_train):
                        break

                y_preds = probs.argmax(axis=-1)
                y_preds = y_preds.tolist()

                from sklearn.metrics import confusion_matrix as sk_confusion_matrix
                labels = [x for x in range(0, num_classes)]
                confusion_matrix_train = sk_confusion_matrix(train_labels,
                                                             y_preds,
                                                             labels=labels)

                print(confusion_matrix_train)

            if compute_cf_valid:
                print('calculate confusion matrix of validation dataset...')
                generator_cf_valid = My_images_generator_2d(
                    valid_files,
                    valid_labels,
                    num_output=num_classes,
                    batch_size=batch_size_valid,
                    image_shape=input_shape)
                i = 0
                for x_valid, y_valid in generator_cf_valid.gen():
                    probabilities = self.model.predict(x_valid)
                    if i == 0:
                        probs = probabilities
                    else:
                        probs = np.vstack((probs, probabilities))

                    i += 1
                    if i == math.ceil(len(valid_files) / batch_size_valid):
                        break

                y_preds = probs.argmax(axis=-1)
                y_preds = y_preds.tolist()

                from sklearn.metrics import confusion_matrix as sk_confusion_matrix
                labels = [x for x in range(0, num_classes)]
                confusion_matrix_valid = sk_confusion_matrix(valid_labels,
                                                             y_preds,
                                                             labels=labels)
                print(confusion_matrix_valid)

            if compute_cf_test:
                print('calculate confusion matrix of test dataset...')
                generator_cf_test = My_images_generator_2d(
                    test_files,
                    test_labels,
                    num_output=num_classes,
                    batch_size=batch_size_valid,
                    image_shape=input_shape)
                i = 0
                for x_test, y_test in generator_cf_test.gen():
                    probabilities = self.model.predict(x_test)
                    if i == 0:
                        probs = probabilities
                    else:
                        probs = np.vstack((probs, probabilities))

                    i += 1
                    if i == math.ceil(len(test_files) / batch_size_valid):
                        break

                y_preds = probs.argmax(axis=-1)
                y_preds = y_preds.tolist()

                from sklearn.metrics import confusion_matrix as sk_confusion_matrix
                labels = [x for x in range(0, num_classes)]
                confusion_matrix_test = sk_confusion_matrix(test_labels,
                                                            y_preds,
                                                            labels=labels)
                print(confusion_matrix_test)
def compute_confusion_matrix(probs_list, dir_dest,
                             all_files, all_labels,
                             dir_preprocess='', dir_original='', ):
    cf_list = []
    not_match_list = []

    # every model's confusion matrix and not match files
    for probs in probs_list:

        y_preds = probs.argmax(axis=-1)
        y_preds = y_preds.tolist()

        NUM_CLASSES = probs[0].shape[0]  #len(probs[0])

        labels = [x for x in range(0, NUM_CLASSES)]
        cf1 = sk_confusion_matrix(all_labels, y_preds, labels=labels)
        cf_list.append(cf1)

        not_match1 = []
        for i in range(len(all_files)):
            if all_labels[i] != y_preds[i]:
                dict_predict = {'filename': all_files[i], 'pred_level': y_preds[i], 'label_level': all_labels[i]}
                not_match1.append(dict_predict)
        not_match_list.append(not_match1)


    # region total confusion matrix and not match files
    for i, probs in enumerate(probs_list):
        if i == 0:
            prob_total = probs
        else:
            prob_total += probs

    prob_total /= len(probs_list)
    y_pred_total = prob_total.argmax(axis=-1)
    cf_total = sk_confusion_matrix(all_labels, y_pred_total, labels=labels)

    not_match_total = []
    for i in range(len(all_files)):
        if all_labels[i] != y_pred_total[i]:
            prob_max = np.max(prob_total[i]) * 100  # find maximum probability value
            dict_predict = {'filename': all_files[i], 'pred_level': y_pred_total[i],
                            'prob_max': prob_max,
                            'label_level': all_labels[i]}
            not_match_total.append(dict_predict)

    # endregion

    #  export confusion files
    if dir_dest != '':
        for dict1 in not_match_total:

            img_file_source = str(dict1['filename']).strip()

            # some files are deleted for some reasons.
            if not os.path.exists(img_file_source):
                raise RuntimeError(img_file_source + ' not found!')

            _, filename = os.path.split(img_file_source)
            file_dest = os.path.join(dir_dest, str(dict1['label_level']) + '_' + str(dict1['pred_level'])
                                     , str(int(dict1['prob_max'])) + '__' + filename)

            #copy original files instead of preprocessed files
            if dir_preprocess != '' and dir_original != '':
                if not dir_preprocess.endswith('/'):
                    dir_preprocess = dir_preprocess + '/'
                if not dir_original.endswith('/'):
                    dir_original = dir_original + '/'

                img_file_source = img_file_source.replace(dir_preprocess, dir_original)

            if not os.path.exists(img_file_source):
                raise RuntimeError(img_file_source + ' not found!')

            if not os.path.exists(os.path.dirname(file_dest)):
                os.makedirs(os.path.dirname(file_dest))

            shutil.copyfile(img_file_source, file_dest)
            print('copy file:', file_dest)


    return cf_list, not_match_list, cf_total, not_match_total
Пример #30
0
    def confusion_matrix(Y_true, Y_pred, label_len=6):
        """
        Generate confusion matrix in a string format
        Parameters
        ----------
        Y_true : list
            The true labels
        Y_pred : list
            The test labels
        label_len : int
            The maximum label text length displayed (minimum length: 6)
        Returns
        -------
        cfmat : str
            The confusion matrix in str format (X-axis: prediction, -axis: ground truth)
        acc : float
            The accuracy
        """
        import numpy as np
        from sklearn.metrics import confusion_matrix as sk_confusion_matrix

        # find labels
        if type(Y_true) == np.ndarray:
            Y_labels = np.unique(Y_true)
        else:
            Y_labels = list(set(Y_true))

        # Check the provided label name length
        if label_len < 6:
            label_len = 6
            logger.warning('label_len < 6. Setting to 6.')
        label_tpl = '%' + '-%ds' % label_len
        col_tpl = '%' + '-%d.2f' % label_len

        # sanity check
        if len(Y_pred) > len(Y_true):
            raise RuntimeError('Y_pred has more items than Y_true')
        elif len(Y_pred) < len(Y_true):
            Y_true = Y_true[:len(Y_pred)]

        cm = sk_confusion_matrix(Y_true, Y_pred, Y_labels)

        # compute confusion matrix
        cm_rate = cm.copy().astype('float')
        cm_sum = np.sum(cm, axis=1)

        # Fill confusion string
        for r, s in zip(cm_rate, cm_sum):
            if s > 0:
                r /= s
        cm_txt = label_tpl % 'gt\dt'
        for l in Y_labels:
            cm_txt += label_tpl % str(l)[:label_len]
        cm_txt += '\n'
        for l, r in zip(Y_labels, cm_rate):
            cm_txt += label_tpl % str(l)[:label_len]
            for c in r:
                cm_txt += col_tpl % c
            cm_txt += '\n'

        # compute accuracy
        correct = 0.0
        for c in range(cm.shape[0]):
            correct += cm[c][c]
        cm_sum = cm.sum()
        if cm_sum > 0:
            acc = correct / cm.sum()
        else:
            acc = 0.0

        return cm_txt, acc
def confusion_matrix(y_true, y_pred, target_names=None, ax=None,
                     normalize=False, cmap=None):
    """
    Plot confustion matrix.

    Parameters
    ----------
    y_true : array-like, shape = [n_samples]
        Correct target values (ground truth).
    y_pred : array-like, shape = [n_samples]
        Target predicted classes (estimator predictions).
    target_names : list
        Lst containing the names of the target classes. List mus be in order
        e.g. ['Label for class 0', 'Label for class 1']. If None, generic
        labels will be generated e.g. ['Class 0', 'Class 1']
    ax: matplotlib Axes
        Axes object to draw the plot onto, otherwise uses current Axes
    normalize : bool
        Normalize the confusion matrix
    cmap : matplotlib Colormap
        If None uses a modified version of matplotlib's OrRd colormap.


    Returns
    -------
    ax: matplotlib Axes
        Axes containing the plot

    """
    # calculate how many names you expect
    values = set(y_true).union(set(y_pred))
    expected_len = len(values)

    if target_names and (expected_len != len(target_names)):
        raise ValueError(('Data cointains {} different values, but target'
                         ' names contains {} values.'.format(expected_len,
                                                             len(target_names)
                                                             )))

    # if the user didn't pass target_names, create generic ones
    if not target_names:
        values = list(values)
        values.sort()
        target_names = ['Class {}'.format(v) for v in values]

    cm = sk_confusion_matrix(y_true, y_pred)
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    np.set_printoptions(precision=2)

    if ax is None:
        ax = plt.gca()

    # this (y, x) may sound counterintuitive. The reason is that
    # in a matrix cell (i, j) is in row=i and col=j, translating that
    # to an x, y plane (which matplotlib uses to plot), we need to use
    # i as the y coordinate (how many steps down) and j as the x coordinate
    # how many steps to the right.
    for (y, x), v in np.ndenumerate(cm):
        try:
            label = '{:.2}'.format(v)
        except:
            label = v
        ax.text(x, y, label, horizontalalignment='center',
                verticalalignment='center')

    if cmap is None:
        cmap = default_heatmap()

    im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.colorbar(im, ax=ax)
    tick_marks = np.arange(len(target_names))
    ax.set_xticks(tick_marks)
    ax.set_xticklabels(target_names)
    ax.set_yticks(tick_marks)
    ax.set_yticklabels(target_names)

    title = 'Confusion matrix'
    if normalize:
        title += ' (normalized)'
    ax.set_title(title)

    ax.set_ylabel('True label')
    ax.set_xlabel('Predicted label')
    return ax