Beispiel #1
0
def test_3d_energy_decrease():

    a_black = np.zeros((5, 5, 5)).astype(np.uint8)
    a_black[2, 2, 2] = 255
    a_white = invert(a_black)

    assert_array_less(
        meijering(a_black, black_ridges=True).std(), a_black.std())
    assert_array_less(
        meijering(a_white, black_ridges=False).std(), a_white.std())

    assert_array_less(
        sato(a_black, black_ridges=True, mode='reflect').std(), a_black.std())
    assert_array_less(
        sato(a_white, black_ridges=False, mode='reflect').std(), a_white.std())

    assert_array_less(frangi(a_black, black_ridges=True).std(), a_black.std())
    assert_array_less(frangi(a_white, black_ridges=False).std(), a_white.std())

    assert_array_less(
        hessian(a_black, black_ridges=True, mode='reflect').std(),
        a_black.std())
    assert_array_less(
        hessian(a_white, black_ridges=False, mode='reflect').std(),
        a_white.std())
Beispiel #2
0
def test_2d_linearity():

    a_black = np.ones((3, 3)).astype(np.uint8)
    a_white = invert(a_black)

    assert_allclose(meijering(1 * a_black, black_ridges=True),
                    meijering(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(meijering(1 * a_white, black_ridges=False),
                    meijering(10 * a_white, black_ridges=False),
                    atol=1e-3)

    assert_allclose(sato(1 * a_black, black_ridges=True),
                    sato(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(sato(1 * a_white, black_ridges=False),
                    sato(10 * a_white, black_ridges=False),
                    atol=1e-3)

    assert_allclose(frangi(1 * a_black, black_ridges=True),
                    frangi(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(frangi(1 * a_white, black_ridges=False),
                    frangi(10 * a_white, black_ridges=False),
                    atol=1e-3)

    assert_allclose(hessian(1 * a_black, black_ridges=True),
                    hessian(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(hessian(1 * a_white, black_ridges=False),
                    hessian(10 * a_white, black_ridges=False),
                    atol=1e-3)
Beispiel #3
0
def test_3d_linearity():

    # Note: last axis intentionally not size 3 to avoid 2D+RGB autodetection
    #       warning from an internal call to `skimage.filters.gaussian`.
    a_black = np.ones((3, 3, 5)).astype(np.uint8)
    a_white = invert(a_black)

    assert_allclose(meijering(1 * a_black, black_ridges=True),
                    meijering(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(meijering(1 * a_white, black_ridges=False),
                    meijering(10 * a_white, black_ridges=False),
                    atol=1e-3)

    assert_allclose(sato(1 * a_black, black_ridges=True, mode='reflect'),
                    sato(10 * a_black, black_ridges=True, mode='reflect'),
                    atol=1e-3)
    assert_allclose(sato(1 * a_white, black_ridges=False, mode='reflect'),
                    sato(10 * a_white, black_ridges=False, mode='reflect'),
                    atol=1e-3)

    assert_allclose(frangi(1 * a_black, black_ridges=True),
                    frangi(10 * a_black, black_ridges=True),
                    atol=1e-3)
    assert_allclose(frangi(1 * a_white, black_ridges=False),
                    frangi(10 * a_white, black_ridges=False),
                    atol=1e-3)

    assert_allclose(hessian(1 * a_black, black_ridges=True, mode='reflect'),
                    hessian(10 * a_black, black_ridges=True, mode='reflect'),
                    atol=1e-3)
    assert_allclose(hessian(1 * a_white, black_ridges=False, mode='reflect'),
                    hessian(10 * a_white, black_ridges=False, mode='reflect'),
                    atol=1e-3)
def sato(image):
	image= data.coins()
	image = filters.sato(image, sigmas=range(1, 10, 2), black_ridges=True, mode=None, cval=0)
	cmap = plt.cm.gray
	fig, axes = plt.subplots(1, 1, squeeze=False)
	axes[0, 0].imshow(image, cmap=cmap, aspect='auto')
	plt.tight_layout()
	plt.show()
Beispiel #5
0
 def apply(self, c):
     if self.filter_name == "frangi":
         return frangi(c)
     if self.filter_name == "hessian":
         return hessian(c)
     if self.filter_name == "sato":
         return sato(c)
     raise ValueError("FilterName - {} - not known".format(
         self.filter_name))
Beispiel #6
0
def test_2d_cropped_camera_image():

    a_black = crop(camera(), ((206, 206), (206, 206)))
    a_white = invert(a_black)

    zeros = np.zeros((100, 100))
    ones = np.ones((100, 100))

    assert_allclose(meijering(a_black, black_ridges=True),
                    meijering(a_white, black_ridges=False))

    assert_allclose(sato(a_black, black_ridges=True),
                    sato(a_white, black_ridges=False))

    assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3)
    assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3)

    assert_allclose(hessian(a_black, black_ridges=True), ones, atol=1 - 1e-7)
    assert_allclose(hessian(a_white, black_ridges=False), ones, atol=1 - 1e-7)
def satoFilter():
    global filterimage
    imgFilter4 = filters.sato(img,
                              sigmas=range(1, 10, 2),
                              black_ridges=True,
                              mode=None,
                              cval=0)
    filterimage = imgFilter4
    io.imshow(imgFilter4)
    io.show()
Beispiel #8
0
def test_2d_null_matrix():

    a_black = np.zeros((3, 3)).astype(np.uint8)
    a_white = invert(a_black)

    zeros = np.zeros((3, 3))
    ones = np.ones((3, 3))

    assert_equal(meijering(a_black, black_ridges=True), ones)
    assert_equal(meijering(a_white, black_ridges=False), ones)

    assert_equal(sato(a_black, black_ridges=True), zeros)
    assert_equal(sato(a_white, black_ridges=False), zeros)

    assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3)
    assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3)

    assert_equal(hessian(a_black, black_ridges=False), ones)
    assert_equal(hessian(a_white, black_ridges=True), ones)
Beispiel #9
0
def test_3d_null_matrix():

    # Note: last axis intentionally not size 3 to avoid 2D+RGB autodetection
    #       warning from an internal call to `skimage.filters.gaussian`.
    a_black = np.zeros((3, 3, 5)).astype(np.uint8)
    a_white = invert(a_black)

    zeros = np.zeros((3, 3, 5))
    ones = np.ones((3, 3, 5))

    assert_allclose(meijering(a_black, black_ridges=True), zeros, atol=1e-1)
    assert_allclose(meijering(a_white, black_ridges=False), zeros, atol=1e-1)

    assert_equal(sato(a_black, black_ridges=True, mode='reflect'), zeros)
    assert_equal(sato(a_white, black_ridges=False, mode='reflect'), zeros)

    assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3)
    assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3)

    assert_equal(hessian(a_black, black_ridges=False, mode='reflect'), ones)
    assert_equal(hessian(a_white, black_ridges=True, mode='reflect'), ones)
Beispiel #10
0
def test_3d_cropped_camera_image():

    a_black = crop(camera(), ((200, 212), (100, 312)))
    a_black = np.dstack([a_black, a_black, a_black])
    a_white = invert(a_black)

    zeros = np.zeros((100, 100, 3))
    ones = np.ones((100, 100, 3))

    assert_allclose(meijering(a_black, black_ridges=True),
                    meijering(a_white, black_ridges=False))

    assert_allclose(sato(a_black, black_ridges=True, mode='reflect'),
                    sato(a_white, black_ridges=False, mode='reflect'))

    assert_allclose(frangi(a_black, black_ridges=True), zeros, atol=1e-3)
    assert_allclose(frangi(a_white, black_ridges=False), zeros, atol=1e-3)

    assert_allclose(hessian(a_black, black_ridges=True, mode='reflect'),
                    ones, atol=1 - 1e-7)
    assert_allclose(hessian(a_white, black_ridges=False, mode='reflect'),
                    ones, atol=1 - 1e-7)
Beispiel #11
0
def get_text(im=None):
    """
    Extract digits using tesseract
    """
    
    sato_res = sato(im, mode='reflect')
    im_filt = img_as_ubyte(sato_res)
    val = threshold_otsu(im_filt)
    im_filt = img_as_ubyte(im_filt >= val)
    
    custom_config = r'-c tessedit_char_whitelist=0123456789. --psm 5 -l letsgodigital'
    res = pytesseract.image_to_string(im_filt, config=custom_config)
    
    return res
    def setup(self):
        retina = color.rgb2gray(data.retina())
        t0, _ = filters.threshold_multiotsu(retina, classes=3)
        mask = (retina > t0)
        vessels = filters.sato(retina, sigmas=range(1, 10)) * mask
        thresholded = filters.apply_hysteresis_threshold(vessels, 0.01, 0.03)
        labeled = ndi.label(thresholded)[0]
        largest_nonzero_label = np.argmax(np.bincount(labeled[labeled > 0]))
        binary = (labeled == largest_nonzero_label)
        self.skeleton = morphology.skeletonize(binary)

        labeled2 = ndi.label(thresholded[::2, ::2])[0]
        largest_nonzero_label2 = np.argmax(np.bincount(labeled2[labeled2 > 0]))
        binary2 = (labeled2 == largest_nonzero_label2)
        small_skeleton = morphology.skeletonize(binary2)
        self.g, self.n = graph.pixel_graph(small_skeleton, connectivity=2)
Beispiel #13
0
 def __call__(self, img):
     """
     :param img: input image
     :returns tubular features image
     """
     if len(img.shape) == 3:
         layers = img.shape[2]
     else:
         img = img[..., np.newaxis]
         layers = 1
     sato = np.zeros(img.shape)
     for i in range(layers):
         sato[..., i] = filters.sato(img[..., i],
                                     black_ridges=self.black,
                                     sigmas=self.sigmas,
                                     mode="reflect")
     return sato
Beispiel #14
0
def retina_speed_image():
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', RuntimeWarning)
        image_data = rescale(rgb2gray(retina())[260:1280, 90:800], 0.5)
        speed_data = sato(image_data)
    return speed_data
def obtenerImagenes(filename, wPath, tmpPath):
    # Redes Convolucionales
    Seg_model = Segmentation_model_2()
    Seg_model.load_weights(wPath, by_name=True)

    in_img = plt.imread(filename, 3) / 255
    in_img = tf.image.resize(in_img, (64*9, 64*9))
    n_img = tf.image.resize(in_img, (64*9, 64*9))
    in_img = tf.stack([in_img, n_img], axis=0)

    salida = Seg_model(in_img, training = False)

    res = salida[0,...]
    res = tf.image.resize(res, (584, 565))
    res = res.numpy()
    res = res.reshape(584,565)

    binary = res > .1
    binary_clean = morphology.remove_small_objects(binary, 3000)
    prueba = np.clip(res,0 , 1)

    cv2.imwrite(tmpPath + "/deepSeg.png", binary_clean * 255)

    # Filtro de Sato

    image = cv2.imread(filename, 0)

    ring = np.zeros((584,565))
    rr, cc = circle(292, 282, 250, shape=(image.shape))
    ring[rr, cc] = 1 


    elevation_map = sobel(image)
    satoi = sato(image)

    thresh = threshold_li(satoi)
    binary = satoi > thresh
    frgi = frangi(image)
    threshF = threshold_li(frgi)

    bF = frgi * 100000 > .03
    bF = morphology.remove_small_objects(bF, 3000)
    bF = bF * ring  #imagen segmentada 1

    binary_clean = morphology.remove_small_objects(binary, 3000)

    l_binary_clean = morphology.label(binary_clean, return_num=True, connectivity=1)

    binary_f = binary_clean * ring # segmentada 2, solo arterias principales

    cv2.imwrite(tmpPath + "/SatoSeg.png", bF * 255)

    shape = binary_f.shape
    a = shape[0]/2 - 1
    b = shape[1] + 1 /2
    centro_ = (233, 291)

    x_1 = y_2 = y_3 = px = i = lado = 0
    y_1 = a
    #primer pixel blanco centro
    while px != 1:
        px = binary_f[int(a)][i]
        i = i + 1
    x_1 = i

    if x_1 > shape[1] / 2:
        lado = 0
        i = shape[1] - 1
        px = 0
        while px != 1:
            px = binary_f[int(a)][i]
            i = i - 1
    else:
        lado = 1

    if lado == 0:
        b = (shape[1] + 1) / 4
        b = (b * 3) - 20
    else:
        b = ((shape[1] + 1) / 4) + 20 
    x_1 = i
    x_2 = b
    x_3 = x_2


    px, i = [0, 0]
    #primer pixel blanco arriba
    while px != 1:
        px = binary_f[i][int(b)]
        i = i + 1
    y_2 = i
    px, i = [0, shape[0] - 1]
    #primer pixel blanco abajo
    while px != 1:
        px = binary_f[i][int(b)]
        i = i - 1
    y_3 = i

    if x_1 < shape[1] / 2:
        lado = 1

    punto1 = (int(x_1), int(y_1))
    punto2 = (int(x_2), int(y_2))
    punto3 = (int(x_3), int(y_3))
    centro = [b, a]

    color = (0, 0, 255)
    # Para calculo de angulos Tan(x) = CatetoOpuesto / CatetoAdyacente
    ca =  np.abs(centro[0] - x_1)
    co1 = np.abs(centro[1] - y_2)
    co2 = np.abs(centro[1] - y_3)

    ca_ = (int(centro[0]), int(y_1)) # coordernadas para impresion
    co_ = (int(centro[0]), y_2) # coordernadas para impresion
    
    img2 = (binary_f * 255) + image
    l = cv2.line(img2, punto1, punto2, color, 5)
    l = cv2.line(l, punto1, punto3, color, 5)
    l = cv2.line(l, punto1, ca_, color, 2)
    l = cv2.line(l, ca_, co_, color, 2)
    l = cv2.circle(l, punto1, 30, color, 2)


    angulo1 = np.rad2deg(np.arctan(co1 / ca)) # x = arcTan((Co/Ca))
    angulo2 = np.rad2deg(np.arctan(co2 / ca)) 

    angulo = np.abs(angulo1) + np.abs(angulo2)
    cv2.putText(l, "{0:.7}".format(angulo), centro_, cv2.QT_FONT_NORMAL, 0.8, (0,0,0), 2, cv2.LINE_AA)

    cv2.imwrite(tmpPath + "/lineas.png", l)
Beispiel #16
0
	def run(self, ips, snap, img, para = None):
		rst = sato(snap, range(para['start'], para['end'], para['step']), black_ridges=para['bridges'])
		img[:] = scale(rst, ips.range[0], ips.range[1])
def sat(image):
    return sato(image)
Beispiel #18
0
 def run(self, ips, imgs, para=None):
     IPy.show_img(
         sato(imgs,
              range(para['start'], para['end'], para['step']),
              black_ridges=para['bridges']), ips.title + '-sato')
Beispiel #19
0
def filter(original_images, transformation):
    """
    :param original_images:
    :param transformation:
    :return:
    """
    nb_images, img_rows, img_cols, nb_channels = original_images.shape
    transformed_images = []
    if (transformation == TRANSFORMATION.filter_sobel):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = filters.sobel(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_median):
        for img in original_images:
            img_trans = ndimage.median_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_minimum):
        for img in original_images:
            img_trans = ndimage.minimum_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_maximum):
        for img in original_images:
            img_trans = ndimage.maximum_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_gaussian):
        for img in original_images:
            img_trans = ndimage.gaussian_filter(img, sigma=1)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_rank):
        for img in original_images:
            img_trans = ndimage.rank_filter(img, rank=15, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_entropy):
        for img in original_images:
            radius = 2
            if (nb_channels == 3):
                radius = 1
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            """
            requires values in range [-1., 1.]
            """
            img = (img - 0.5) * 2.
            """
            skimage-entropy function returns values in float64,
            however opencv only supports float32.
            """
            img_trans = np.float32(
                filters.rank.entropy(img, disk(radius=radius)))
            """
            rescale back into range [0., 1.]
            """
            img_trans = (img_trans / 2.) + 0.5
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_roberts):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = roberts(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_scharr):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = scharr(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_prewitt):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = prewitt(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_meijering):
        for img in original_images:
            if nb_channels == 1:
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
            img_trans = meijering(img, sigmas=[0.01])
            if nb_channels == 1:
                img_trans = img_trans[:, :, 1]
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_sato):
        for img in original_images:
            img_trans = sato(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_frangi):
        for img in original_images:
            img_trans = frangi(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_hessian):
        for img in original_images:
            img_trans = hessian(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_skeletonize):
        for img in original_images:
            img = invert(img)
            img = img.reshape((img_rows, img_cols))
            img = skeletonize(img)
            transformed_images.append(img)
    elif (transformation == TRANSFORMATION.filter_thin):
        for img in original_images:
            img = img.reshape(img_rows, img_cols)
            img = thin(img, max_iter=100)
            transformed_images.append(img)
    else:
        raise ValueError('{} is not supported.'.format(transformation))

    transformed_images = np.stack(transformed_images, axis=0)
    if (nb_channels == 1):
        # reshape a 3d to a 4d
        transformed_images = transformed_images.reshape(
            (nb_images, img_rows, img_cols, nb_channels))
    return transformed_images
#ridge detection

from skimage import io, filters, feature
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage.util import invert
import cv2

from skimage.filters import meijering, sato, frangi, hessian

img = io.imread("Classified image 1.jpg")
#img = rgb2gray(invert(img))
img = rgb2gray(img)

meijering_img = meijering(img)
sato_img = sato(img)
frangi_img = frangi(img)
hessian_img = hessian(img)

fig = plt.figure(figsize=(20, 20))

#ax1 = fig.add_subplot(2,2,1)
#ax1.imshow(img, cmap="gray")
#ax1.title.set_text("Input Image")

ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(hessian_img, cmap="gray")
ax1.title.set_text("Hessian")

ax2 = fig.add_subplot(2, 2, 2)
ax2.imshow(meijering_img, cmap="gray")
Beispiel #21
0
def noisy_mountains_2():
    rand_color = randomcolor.RandomColor()

    size_x = 20
    size_y = 20
    upscale_factor = 10
    n_labels = 4
    sigma = 1
    buffer = 0.0005
    hsv_index = 1
    segment_spacing = [10, 10]
    image_rgb = np.random.uniform(0, 1, (size_x, size_y, 3))
    labels = np.random.randint(n_labels + 1, size=(
        size_x, size_y)) * np.random.randint(0, 2, size=(size_x, size_y))

    # segment random image
    segments = random_walker(
        image_rgb,
        labels,
        multichannel=True,
        beta=250,
        copy=False,
        spacing=segment_spacing,
    )

    all_colors = np.array(
        rand_color.generate(hue="purple", count=n_labels,
                            format_='Array_rgb')) / 256.

    for color_index in np.unique(segments):
        color_hsv = color.rgb2hsv(all_colors[color_index - 1])
        color_hsv[2] = 0.3 + (color_index - 1) * 0.4 / n_labels
        color_rgb = color.hsv2rgb(color_hsv)
        image_rgb[segments == color_index] = color_rgb

    # transform segmented image so it is large, preserving blobs, and blurry
    image_rgb = rescale(image_rgb,
                        upscale_factor,
                        anti_aliasing=False,
                        multichannel=True)
    image_rgb = gaussian(image_rgb, sigma=sigma, multichannel=True)
    image_hsv = color.rgb2hsv(image_rgb)

    plt.figure()
    plt.imshow(image_rgb)
    plt.show()

    for pix_frac in [0.9]:
        total_pixels_switched = int(image_rgb.shape[0] * image_rgb.shape[0] *
                                    pix_frac)
        print(total_pixels_switched)
        for _ in range(total_pixels_switched):
            rand_x, rand_y = np.random.choice(
                image_hsv.shape[0]), np.random.choice(image_hsv.shape[1])
            orig_rgb = image_rgb[rand_x, rand_y]
            rand_value = image_hsv[rand_x, rand_y, hsv_index]

            x, y = np.where((rand_value *
                             (1 + 0.5 * buffer) >= image_hsv[:, :, hsv_index])
                            & (rand_value *
                               (1 - 2 * buffer) < image_hsv[:, :, hsv_index]))
            if len(x) == 0:
                continue
            idx = np.random.choice(len(x))
            update_rgb = image_rgb[x[idx], y[idx]]

            image_rgb[x[idx], y[idx]] = orig_rgb
            image_rgb[rand_x, rand_y] = update_rgb

        plt.figure()
        plt.title(pix_frac)
        plt.imshow(image_rgb)
        plt.show()

    plt.figure()
    plt.imshow(image_rgb)
    plt.show()

    # filtered_img = prewitt_v(color.rgb2hsv(image_rgb)[:, :, 0])
    # filtered_img = meijering(color.rgb2hsv(image_rgb)[:, :, 0])
    filtered_img = sato(color.rgb2hsv(image_rgb)[:, :, 0])
    # filtered_img = frangi(color.rgb2hsv(image_rgb)[:, :, 0])

    plt.figure()
    plt.imshow(filtered_img)
    plt.show()

    filtered_img = filtered_img
    filtered_img = (filtered_img + np.abs(np.min(filtered_img))
                    ) / np.max(filtered_img + np.abs(np.min(filtered_img)))
    filtered_img += 0.8

    plt.figure()
    plt.imshow(filtered_img, cmap='gray')
    plt.colorbar()
    plt.show()

    num_erosions = 5
    eroded_image = hessian(color.rgb2hsv(image_rgb)[:, :, 0])
    eroded_aug = np.zeros_like(eroded_image)
    for n in range(num_erosions):
        eroded_aug += 1 * eroded_image
        eroded_image = erosion(eroded_image)

    plt.figure()
    plt.imshow(image_rgb)
    plt.show()

    image_hsv = color.rgb2hsv(image_rgb)
    image_hsv[:, :, 2] *= filtered_img
    image_hsv[:, :, 2] *= 1 - (0.3 * eroded_aug / np.max(eroded_aug))
    image_rgb_shadow_aug = color.hsv2rgb(image_hsv)
    plt.figure()
    plt.imshow(image_rgb_shadow_aug)
    plt.show()
Beispiel #22
0
def sato_filter(filename):
    img = asarray(Image.open(filename))
    img = rgb2gray(img)
    img = filters.sato(img)
    plt.imsave(filename, img, cmap="gray")
    return filename
Beispiel #23
0
def run(img, **args):
    if len(img.shape) > 2 and img.shape[2] == 4:
        img = color.rgba2rgb(img)
    if len(img.shape) == 2:
        img = color.gray2rgb(img)
    return to_base64(sato(color.rgb2gray(img), **args))
    def detectNeedle(self, magnitudevolume, phasevolume, truePhasePoint,
                     maskThreshold, ridgeOperator, slice_index):

        #magnitude volume
        magn_imageData = magnitudevolume.GetImageData()
        magn_rows, magn_cols, magn_zed = magn_imageData.GetDimensions()
        magn_scalars = magn_imageData.GetPointData().GetScalars()
        magn_imageOrigin = magnitudevolume.GetOrigin()
        magn_imageSpacing = magnitudevolume.GetSpacing()
        magn_matrix = vtk.vtkMatrix4x4()
        magnitudevolume.GetIJKToRASMatrix(magn_matrix)
        # magnitudevolume.CreateDefaultDisplayNodes()

        # phase volume
        phase_imageData = phasevolume.GetImageData()
        phase_rows, phase_cols, phase_zed = phase_imageData.GetDimensions()
        phase_scalars = phase_imageData.GetPointData().GetScalars()

        #Convert vtk to numpy
        magn_array = numpy_support.vtk_to_numpy(magn_scalars)
        numpy_magn = magn_array.reshape(magn_zed, magn_rows, magn_cols)
        phase_array = numpy_support.vtk_to_numpy(phase_scalars)
        numpy_phase = phase_array.reshape(phase_zed, phase_rows, phase_cols)

        # slice = int(slice_number)
        # slice = (slice_index)
        # maskThreshold = int(maskThreshold)

        #2D Slice Selector
        ### 3 3D values are : numpy_magn , numpy_phase, mask
        numpy_magn = numpy_magn[slice_index, :, :]
        numpy_phase = numpy_phase[slice_index, :, :]
        #mask = mask[slice,:,:]
        numpy_magn_sliced = numpy_magn.astype(np.uint8)

        #mask thresholding
        img = cv2.pyrDown(numpy_magn_sliced)
        _, threshed = cv2.threshold(numpy_magn_sliced, maskThreshold, 255,
                                    cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(threshed, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

        #find maximum contour and draw
        cmax = max(contours, key=cv2.contourArea)
        epsilon = 0.002 * cv2.arcLength(cmax, True)
        approx = cv2.approxPolyDP(cmax, epsilon, True)
        cv2.drawContours(numpy_magn_sliced, [approx], -1, (0, 255, 0), 3)

        width, height = numpy_magn_sliced.shape

        #fill maximum contour and draw
        mask = np.zeros([width, height, 3], dtype=np.uint8)
        cv2.fillPoly(mask, pts=[cmax], color=(255, 255, 255))
        mask = mask[:, :, 0]

        #phase_cropped
        phase_cropped = cv2.bitwise_and(numpy_phase, numpy_phase, mask=mask)
        phase_cropped = np.expand_dims(phase_cropped, axis=0)

        node = slicer.vtkMRMLScalarVolumeNode()
        node.SetName('phase_cropped')
        slicer.mrmlScene.AddNode(node)

        slicer.util.updateVolumeFromArray(node, phase_cropped)
        node.SetOrigin(magn_imageOrigin)
        node.SetSpacing(magn_imageSpacing)
        node.SetIJKToRASDirectionMatrix(magn_matrix)

        unwrapped_phase = slicer.vtkMRMLScalarVolumeNode()
        unwrapped_phase.SetName('unwrapped_phase')
        slicer.mrmlScene.AddNode(unwrapped_phase)

        #
        # Run phase unwrapping module
        #
        parameter_name = slicer.mrmlScene.GetNodeByID(
            'vtkMRMLCommandLineModuleNode1')

        if parameter_name is None:
            slicer.cli.createNode(slicer.modules.phaseunwrapping)
        else:
            pass
        cli_input = slicer.util.getFirstNodeByName('phase_cropped')
        cli_output = slicer.util.getNode('unwrapped_phase')

        cli_params = {
            'inputVolume': cli_input,
            'outputVolume': cli_output,
            'truePhase': truePhasePoint
        }
        self.cliParamNode = slicer.cli.runSync(slicer.modules.phaseunwrapping,
                                               node=self.cliParamNode,
                                               parameters=cli_params)

        pu_imageData = unwrapped_phase.GetImageData()
        pu_rows, pu_cols, pu_zed = pu_imageData.GetDimensions()
        pu_scalars = pu_imageData.GetPointData().GetScalars()
        pu_NumpyArray = numpy_support.vtk_to_numpy(pu_scalars)
        phaseunwrapped = pu_NumpyArray.reshape(pu_zed, pu_rows, pu_cols)

        # for debug
        self.phaseunwrapped_numpy = pu_NumpyArray.reshape(pu_cols, pu_rows)

        #Delete unwrapped_phase after I get the information from it
        # delete_unwrapped = slicer.mrmlScene.GetFirstNodeByName('Phase Unwrapping')
        # slicer.mrmlScene.RemoveNode(delete_unwrapped)

        I = phaseunwrapped.squeeze()
        A = np.fft.fft2(I)
        A1 = np.fft.fftshift(A)

        # Image size
        [M, N] = A.shape

        # filter size parameter
        R = 10

        X = np.arange(0, N, 1)
        Y = np.arange(0, M, 1)

        [X, Y] = np.meshgrid(X, Y)
        Cx = 0.5 * N
        Cy = 0.5 * M
        Lo = np.exp(-(((X - Cx)**2) + ((Y - Cy)**2)) / ((2 * R)**2))
        Hi = 1 - Lo

        J = A1 * Lo
        J1 = np.fft.ifftshift(J)
        B1 = np.fft.ifft2(J1)

        K = A1 * Hi
        K1 = np.fft.ifftshift(K)
        B2 = np.fft.ifft2(K1)
        B2 = np.real(B2)

        #Remove border  for false positive
        border_size = 20
        top, bottom, left, right = [border_size] * 4
        mask_borderless = cv2.copyMakeBorder(mask, top, bottom, left, right,
                                             cv2.BORDER_CONSTANT, (0, 0, 0))

        kernel = np.ones((5, 5), np.uint8)
        mask_borderless = cv2.erode(mask_borderless, kernel, iterations=5)
        mask_borderless = ndimage.binary_fill_holes(mask_borderless).astype(
            np.uint8)
        x, y = mask_borderless.shape
        mask_borderless = mask_borderless[0 + border_size:y - border_size,
                                          0 + border_size:x - border_size]

        B2 = cv2.bitwise_and(B2, B2, mask=mask_borderless)

        # for debug
        self.mask_borderless = mask_borderless

        # ridgeOperator = int(ridgeOperator)
        meiji = sato(B2,
                     sigmas=(ridgeOperator, ridgeOperator),
                     black_ridges=True)

        #(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(meiji)

        result2 = np.reshape(meiji, meiji.shape[0] * meiji.shape[1])

        ids = np.argpartition(result2, -51)[-51:]
        sort = ids[np.argsort(result2[ids])[::-1]]

        (y1, x1) = np.unravel_index(sort[0], meiji.shape)  # best match

        self.meiji = meiji

        point = (x1, y1)
        coords = [x1, y1, slice_index]
        circle1 = plt.Circle(point, 2, color='red')

        self.x1 = x1
        self.y1 = y1

        # Find or create MRML transform node
        transformNode = None
        try:
            transformNode = slicer.util.getNode('TipTransform')
        except slicer.util.MRMLNodeNotFoundException as exc:
            transformNode = slicer.mrmlScene.AddNewNodeByClass(
                'vtkMRMLLinearTransformNode')
            transformNode.SetName("TipTransform")

        transformNode.SetAndObserveMatrixTransformToParent(magn_matrix)

        # Fiducial Creation
        fidNode1 = None
        try:
            fidNode1 = slicer.util.getNode('needle_tip')
        except slicer.util.MRMLNodeNotFoundException as exc:
            fidNode1 = slicer.mrmlScene.AddNewNodeByClass(
                "vtkMRMLMarkupsFiducialNode", "needle_tip")

        fidNode1.RemoveAllMarkups()

        #  fidNode1.CreateDefaultDisplayNodes()
        #  fidNode1.SetMaximumNumberOfControlPoints(1)

        fidNode1.AddFiducialFromArray(coords)
        fidNode1.SetAndObserveTransformNodeID(transformNode.GetID())

        ###TODO: dont delete the volume after use. create a checkpoint to update on only one volume
        delete_wrapped = slicer.mrmlScene.GetFirstNodeByName('phase_cropped')
        slicer.mrmlScene.RemoveNode(delete_wrapped)
        delete_unwrapped = slicer.mrmlScene.GetFirstNodeByName(
            'unwrapped_phase')
        slicer.mrmlScene.RemoveNode(delete_unwrapped)

        #print ("Needle tip location",y1,x1)
        #self.counter = 0
        return True
Beispiel #25
0
def vectorize_lines(im: np.ndarray, threshold: float = 0.15, min_length=5):
    """
    Vectorizes lines from a binarized array.

    Args:
        im (np.ndarray): Array of shape (3, H, W) with the first dimension
                         being probabilities for (start_separators,
                         end_separators, baseline).
        threshold (float): Threshold for baseline blob detection.
        min_length (int): Minimal length of output baselines.

    Returns:
        [[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
        A list of lists containing the points of all baseline polylines.
    """
    # split into baseline and separator map
    st_map = im[0]
    end_map = im[1]
    sep_map = st_map + end_map
    bl_map = im[2]
    bl_map = filters.sato(bl_map, black_ridges=False, mode='constant')
    bin_bl_map = bl_map > threshold
    # skeletonize
    line_skel = skeletonize(bin_bl_map)
    # find end points
    kernel = np.array([[1, 1, 1], [1, 10, 1], [1, 1, 1]])
    line_extrema = np.transpose(
        np.where(
            (convolve2d(line_skel, kernel, mode='same') == 11) * line_skel))

    class LineMCP(MCP_Connect):
        def __init__(self, *args, **kwargs):
            super().__init__(*args, **kwargs)
            self.connections = dict()
            self.scores = defaultdict(lambda: np.inf)

        def create_connection(self, id1, id2, pos1, pos2, cost1, cost2):
            k = (min(id1, id2), max(id1, id2))
            s = cost1 + cost2
            if self.scores[k] > s:
                self.connections[k] = (pos1, pos2, s)
                self.scores[k] = s

        def get_connections(self):
            results = []
            for k, (pos1, pos2, s) in self.connections.items():
                results.append(
                    np.concatenate(
                        [self.traceback(pos1),
                         self.traceback(pos2)[::-1]]))
            return results

        def goal_reached(self, int_index, float_cumcost):
            return 2 if float_cumcost else 0

    mcp = LineMCP(~line_skel)
    try:
        mcp.find_costs(line_extrema)
    except ValueError as e:
        return []

    lines = [
        approximate_polygon(line, 3).tolist()
        for line in mcp.get_connections()
    ]
    # extend baselines to blob boundary
    lines = _extend_boundaries(lines, bin_bl_map)

    # orient lines
    f_st_map = maximum_filter(st_map, size=20)
    f_end_map = maximum_filter(end_map, size=20)

    oriented_lines = []
    for bl in lines:
        l_end = tuple(bl[0])
        r_end = tuple(bl[-1])
        if f_st_map[l_end] - f_end_map[l_end] > 0.2 and f_st_map[
                r_end] - f_end_map[r_end] < -0.2:
            pass
        elif f_st_map[l_end] - f_end_map[l_end] < -0.2 and f_st_map[
                r_end] - f_end_map[r_end] > 0.2:
            bl = bl[::-1]
        else:
            logger.debug(
                'Insufficient marker confidences in output. Defaulting to upright line.'
            )
        if bl[0][1] > bl[-1][1]:
            bl = bl[::-1]
        if geom.LineString(bl).length >= min_length:
            oriented_lines.append([x[::-1] for x in bl])
    return oriented_lines
Beispiel #26
0
def tubeness(image, sigma_max=3):
    """Wrapper around the scikit-image sato tubeness filter"""

    tube = sato(image, sigmas=range(1, sigma_max + 1), black_ridges=False)

    return tube
Beispiel #27
0
    def buttonSave(arg):

        copy = arg[1]
        imageTemp = arg[2]
        filterTry = filterVar.get()
        if (filterTry == 1):
            copy = cv2.GaussianBlur(copy, (5, 5), 0)
        elif (filterTry == 2):
            copy = cv2.Canny(copy, 100, 150)
        elif (filterTry == 3):
            copy = filters.roberts(imageTemp)
        elif (filterTry == 4):
            copy = filters.sato(imageTemp)
        elif (filterTry == 5):
            copy = filters.scharr(imageTemp)
        elif (filterTry == 6):
            copy = filters.sobel(imageTemp)
        elif (filterTry == 7):
            copy = filters.unsharp_mask(copy, radius=30, amount=3)
        elif (filterTry == 8):
            #copy = filters.median(imageTemp, disk(5))
            b, g, r = cv2.split(copy)
            b = filters.median(b, disk(5))
            g = filters.median(g, disk(5))
            r = filters.median(r, disk(5))
            copy = cv2.merge((b, g, r))
        elif (filterTry == 9):
            copy = filters.prewitt(imageTemp)
        elif (filterTry == 10):
            copy = filters.rank.modal(imageTemp, disk(5))
        flag = 0
        if (np.ndim(copy) == 2):
            flag = 0
        else:
            flag = 1

        if (hEsitleme.get() or hGrafik.get()):
            if (flag):
                copy = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
            if (hGrafik.get()):
                plt.hist(copy.ravel(), 256, [0, 256])
                plt.show()
            if (hEsitleme.get()):
                copy = cv2.equalizeHist(copy)

        if (uzaysalVars[0].get()):
            reScaleRatio = float(uzaysalVarsInputs[0].get())
            if (np.ndim(copy) == 3):
                b, g, r = cv2.split(copy)
                b = transform.rescale(b, reScaleRatio)
                g = transform.rescale(g, reScaleRatio)
                r = transform.rescale(r, reScaleRatio)
                copy = cv2.merge((b, g, r))
            else:
                copy = transform.rescale(copy, reScaleRatio)

        if (uzaysalVars[1].get()):
            resizeY = float(uzaysalVarsInputs[1].get())
            resizeX = float(uzaysalVarsInputs[2].get())
            if (np.ndim(copy) == 3):
                b, g, r = cv2.split(copy)
                b = transform.resize(
                    b, (b.shape[0] // resizeX, b.shape[1] // resizeY),
                    anti_aliasing=True)
                g = transform.resize(
                    g, (g.shape[0] // resizeX, g.shape[1] // resizeY),
                    anti_aliasing=True)
                r = transform.resize(
                    r, (r.shape[0] // resizeX, r.shape[1] // resizeY),
                    anti_aliasing=True)
                copy = cv2.merge((b, g, r))
            else:
                copy = transform.resize(
                    copy, (copy.shape[0] // resizeX, copy.shape[1] // resizeY),
                    anti_aliasing=True)
        if (uzaysalVars[2].get()):
            copy = transform.swirl(copy, rotation=0, strength=10, radius=120)
        if (uzaysalVars[3].get()):
            copy = transform.rotate(copy,
                                    int(uzaysalVarsInputs[3].get()),
                                    resize=True)
        if (uzaysalVars[4].get()):
            copy = copy[:, ::-1]

        if (yogunlukVars[0].get() or yogunlukVars[1].get()):
            if (yogunlukVars[0].get()):
                startINX = int(yogunlukVars[2].get())
                finishINX = int(yogunlukVars[3].get())
                copy = exposure.rescale_intensity(copy,
                                                  in_range=(startINX,
                                                            finishINX))
            if (yogunlukVars[1].get()):
                startOUTX = int(yogunlukVars[4].get())
                finishOUTX = int(yogunlukVars[5].get())
                copy = exposure.rescale_intensity(copy,
                                                  out_range=(startOUTX,
                                                             finishOUTX))

        morfoTry = morfVar.get()
        morfoGirisN = 0
        if (np.ndim(copy) == 3):
            morfoGirisN = 1

        if (morfoTry == 1):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.area_closing(b, 128, 9)
                g = morphology.area_closing(g, 128, 9)
                r = morphology.area_closing(r, 128, 9)
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.area_closing(copy)
        elif (morfoTry == 2):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.area_opening(b, 128, 9)
                g = morphology.area_opening(g, 128, 9)
                r = morphology.area_opening(r, 128, 9)
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.area_opening(copy)
        elif (morfoTry == 3):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.erosion(b, disk(6))
                g = morphology.erosion(g, disk(6))
                r = morphology.erosion(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.erosion(copy, disk(6))
        elif (morfoTry == 4):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.dilation(b, disk(6))
                g = morphology.dilation(g, disk(6))
                r = morphology.dilation(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.dilation(copy, disk(6))
        elif (morfoTry == 5):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.opening(b, disk(6))
                g = morphology.opening(g, disk(6))
                r = morphology.opening(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.opening(copy, disk(6))
        elif (morfoTry == 6):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.closing(b, disk(6))
                g = morphology.opening(g, disk(6))
                r = morphology.opening(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.opening(copy, disk(6))
        elif (morfoTry == 7):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.white_tophat(b, disk(6))
                g = morphology.white_tophat(g, disk(6))
                r = morphology.white_tophat(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.white_tophat(copy, disk(6))
        elif (morfoTry == 8):
            if (morfoGirisN):
                b, g, r = cv2.split(copy)
                b = morphology.black_tophat(b, disk(6))
                g = morphology.black_tophat(g, disk(6))
                r = morphology.black_tophat(r, disk(6))
                copy = cv2.merge((b, g, r))
            else:
                copy = morphology.black_tophat(copy, disk(6))
        elif (morfoTry == 10):
            if (morfoGirisN):
                copy = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)

            copy = exposure.rescale_intensity(copy)
            local_maxima = extrema.local_maxima(copy)
            label_maxima = measure.label(local_maxima)
            copy = color.label2rgb(label_maxima,
                                   copy,
                                   alpha=0.7,
                                   bg_label=0,
                                   bg_color=None,
                                   colors=[(1, 0, 0)])
        elif (morfoTry == 9):
            if (morfoGirisN):
                copy = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
            copy = exposure.rescale_intensity(copy)
            h = 0.05
            h_maxima = extrema.h_maxima(copy, h)
            label_h_maxima = measure.label(h_maxima)
            copy = color.label2rgb(label_h_maxima,
                                   copy,
                                   alpha=0.7,
                                   bg_label=0,
                                   bg_color=None,
                                   colors=[(1, 0, 0)])
        arg[1] = copy
        arg[2] = imageTemp
        cv2.imshow("org", copy)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        """
Beispiel #28
0
 def run(self, ips, imgs, para=None):
     imgs[:] = sato(imgs,
                    range(para['start'], para['end'], para['step']),
                    black_ridges=para['bridges'])
def sato_filter(im, sigmas):
    '''Enter function general description + arguments'''
    im_sato = sato(im,sigmas=sigmas,mode='reflect',black_ridges=False)   
    return im_sato
Beispiel #30
0
def bonus(use_naive=False):
    """
    Fair compare.
    https://github.com/orobix/retina-unet/blob/master/src/retinaNN_predict.py
    """
    from sklearn.metrics import confusion_matrix
    from sklearn.metrics import precision_recall_curve
    from sklearn.metrics import jaccard_score
    from sklearn.metrics import f1_score

    y_pred, y_true = [], []
    for filename in sorted(glob("idx*_s64_out1_p.png")):
        if not use_naive:
            y_pred.append(io.imread(filename, as_gray=True))
        else:
            from skimage.filters import sato

            kwargs = {"sigmas": [1], "black_ridges": 1}
            raw = io.imread(filename.replace("1_p", "3_x"), as_gray=True)
            img = sato(raw, **kwargs)
            # plt.figure()
            # plt.imshow(img)
            # plt.show()
            y_pred.append(img)

        y_true.append(io.imread(filename.replace("1_p", "2_y"), as_gray=True))
    y_pred, y_true = np.array(y_pred), np.array(y_true)

    if use_naive:
        y_pred = RetinalDataset.normalize(np.array(y_pred)) / 255

    print(y_true.shape, y_pred.shape)

    for i in range(len(y_pred)):
        print(i, "DICE", dice_loss_1(y_pred[i], y_true[i]))

    # Confusion matrix
    threshold_confusion = 0.8
    print(y_true.max(), y_pred.max())
    print(y_true.shape, y_pred.shape)

    y_pred = np.where(y_pred > threshold_confusion, 1, 0).astype(int)
    y_true = y_true.astype(int)

    # plt.figure()
    # plt.imshow(y_pred[0])
    # plt.show()

    def __specific(A, B, name="?"):
        A, B = A.flatten(), B.flatten()
        print(f"=== \033[90m(name={name})\033[0m ===")
        confusion = confusion_matrix(A, B)
        print(confusion)
        accuracy = 0
        if float(np.sum(confusion)) != 0:
            accuracy = float(confusion[0, 0] + confusion[1, 1]) / float(
                np.sum(confusion))
        print("Global Accuracy: " + str(accuracy))
        specificity = 0
        if float(confusion[0, 0] + confusion[0, 1]) != 0:
            specificity = float(
                confusion[0, 0]) / float(confusion[0, 0] + confusion[0, 1])
        print("Specificity: " + str(specificity))
        sensitivity = 0
        if float(confusion[1, 1] + confusion[1, 0]) != 0:
            sensitivity = float(
                confusion[1, 1]) / float(confusion[1, 1] + confusion[1, 0])
        print("Sensitivity: " + str(sensitivity))
        precision = 0
        if float(confusion[1, 1] + confusion[0, 1]) != 0:
            precision = float(
                confusion[1, 1]) / float(confusion[1, 1] + confusion[0, 1])
        print("Precision: " + str(precision))
        # F1 score
        F1_score = f1_score(A,
                            B,
                            labels=None,
                            average="binary",
                            sample_weight=None)
        print("F1 score (F-measure): " + str(F1_score))
        # Jaccard similarity index
        jaccard_index = jaccard_score(A, B, average="binary")
        print("Jaccard similarity score: " + str(jaccard_index))

    __specific(y_true, y_pred, name="all")

    for idx in range(y_pred.shape[0]):
        __specific(y_true[idx], y_pred[idx], name=idx)