コード例 #1
0
ファイル: tools.py プロジェクト: mjirik/lisa
def canny(data, sigma=1, sliceId=2):
    edges = np.zeros(data.shape, dtype=np.bool)
    if sliceId == 2:
        for idx in range(data.shape[2]):
            edges[:, :, idx] = skifil.canny(data[:, :, idx], sigma=sigma)
    elif sliceId == 0:
        for idx in range(data.shape[0]):
            edges[idx, :, :] = skifil.canny(data[idx, :, :], sigma=sigma)
    return edges
コード例 #2
0
ファイル: tools.py プロジェクト: mjirik/lisa
def canny(data, sigma=1, sliceId=2):
    edges = np.zeros(data.shape, dtype=np.bool)
    if sliceId == 2:
        for idx in range(data.shape[2]):
            edges[:, :, idx] = skifil.canny(data[:, :, idx], sigma=sigma)
    elif sliceId == 0:
        for idx in range(data.shape[0]):
            edges[idx, :, :] = skifil.canny(data[idx, :, :], sigma=sigma)
    return edges
コード例 #3
0
ファイル: PluginQCMRI.py プロジェクト: wadqc/WAD_Python
def mid_phantom(image_ACR, imarray):
    # Detect edges in image
    edges = filters.canny(imarray,
                          sigma=3,
                          low_threshold=200,
                          high_threshold=1000)

    hough_radii = np.array([190 / 2 / image_ACR.PixelSpacing[1]])
    print type(edges)
    print type(hough_radii)
    hough_res = hough_circle(edges, hough_radii)

    # Detect contours and middle of phantom
    centers = []
    radii = []

    for radius, h in zip(hough_radii, hough_res):
        peaks = peak_local_max(h, num_peaks=1)
        centers.extend(peaks)
        radii.extend([radius, radius])

    center_x, center_y = centers[0]
    radius = radii[1]  # Niet nodig?
    radius = np.int32(radius)  # Niet nodig?
    cy, cx = circle_perimeter(center_y, center_x, radius)  # Niet nodig?
    return center_x, center_y, radii
コード例 #4
0
ファイル: filter_spatial.py プロジェクト: yunjunz/PySAR
def filter(data,filtType,par):

    if   filtType == "sobel":       filt_data = sobel(data)
    elif filtType == "roberts":     filt_data = roberts(data)
    elif filtType == "canny":       filt_data = canny(data)
    elif filtType == "lowpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        filt_data = ndimage.convolve(data, kernel)
    elif filtType == "highpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        lp_data = ndimage.convolve(data, kernel)
        filt_data = data - lp_data
    elif filtType == "lowpass_gaussian":
        filt_data = gaussian(data, sigma=float(par))
    elif filtType == "highpass_gaussian":
        lp_data   = gaussian(data, sigma=float(par))
        filt_data = data - lp_data

    #elif filtType ==  "gradient":
       
    return filt_data
コード例 #5
0
def main():
	plt.figure(facecolor='black')
	plt.gray()
	
	for i in range(6):
		img = io.imread(images[i], as_grey=True)
		img = filters.canny(img)
		img = morphology.erosion(img, morphology.square(1))
		
		plt.subplot(2,3,i)
		plt.axis('off')
		io.imshow(img)
	plt.show()
コード例 #6
0
def build_r_table(image, origin):
    '''
    Build the R-table from the given shape image and a reference point
    '''
    edges = canny(image,
                  low_threshold=MIN_CANNY_THRESHOLD,
                  high_threshold=MAX_CANNY_THRESHOLD)
    gradient = gradient_orientation(edges)

    r_table = defaultdict(list)
    for (i, j), value in np.ndenumerate(edges):
        if value:
            r_table[gradient[i, j]].append((origin[0] - i, origin[1] - j))

    return r_table
コード例 #7
0
def eoh(image):
    data = { 0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0 }
    canny_image = canny(image, low_threshold=40)
    row, col = canny_image.shape

    sobel_v_image = sobel_v(image)
    sobel_h_image = sobel_h(image)

    for r in xrange(row):
        for c in xrange(col):
            if not canny_image[r][c]:
                continue
            interval = which_interval(sobel_v_image[r][c], sobel_h_image[r][c])
            if data.has_key(interval):
                data[interval] += 1

    return data
コード例 #8
0
def accumulate_gradients(r_table, grayImage):
    '''
    Perform a General Hough Transform with the given image and R-table
    '''
    edges = canny(grayImage,
                  low_threshold=MIN_CANNY_THRESHOLD,
                  high_threshold=MAX_CANNY_THRESHOLD)
    gradient = gradient_orientation(edges)

    accumulator = np.zeros(grayImage.shape)
    for (i, j), value in np.ndenumerate(edges):
        if value:
            for r in r_table[gradient[i, j]]:
                accum_i, accum_j = i + r[0], j + r[1]
                if accum_i < accumulator.shape[
                        0] and accum_j < accumulator.shape[1]:
                    accumulator[accum_i, accum_j] += 1

    return accumulator
コード例 #9
0
ファイル: img_tt.py プロジェクト: CalCharles/object_finding
def run_process(img_name, cf):
	if type(img_name) is str:
		img = load_image(img_name)
	else:
		img = img_name
	height = img.shape[0]/cf
	width = img.shape[1]/cf
	r_img = sk.transform.resize(img, (img.shape[0]/cf, img.shape[1]/3, cf))
	g_img = greyout(r_img)
	f_img = gaussian_filter(g_img, ST_DEV)
	t_img = thresholder(f_img)
	return height, width, t_img
	edges = canny(t_img, 3)

	skio.imshow(r_img)
	skio.show()
	skio.imshow(f_img)
	skio.show()
	skio.imshow(t_img)
	skio.show()
	skio.imshow(edges)
	skio.show()
コード例 #10
0
ファイル: filter_spatial.py プロジェクト: nillei/PySAR
def filter(data, filtType, par):

    if filtType == "sobel": filt_data = sobel(data)
    elif filtType == "roberts": filt_data = roberts(data)
    elif filtType == "canny": filt_data = canny(data)
    elif filtType == "lowpass_avg":
        p = int(par)
        kernel = np.ones((p, p), np.float32) / (p * p)
        filt_data = ndimage.convolve(data, kernel)
    elif filtType == "highpass_avg":
        p = int(par)
        kernel = np.ones((p, p), np.float32) / (p * p)
        lp_data = ndimage.convolve(data, kernel)
        filt_data = data - lp_data
    elif filtType == "lowpass_gaussian":
        filt_data = gaussian(data, sigma=float(par))
    elif filtType == "highpass_gaussian":
        lp_data = gaussian(data, sigma=float(par))
        filt_data = data - lp_data

    #elif filtType ==  "gradient":

    return filt_data
コード例 #11
0
def test_canny_import():
    data = moon()
    with expected_warnings(['skimage.feature.canny']):
        from skimage.filters import canny
        canny(data)
コード例 #12
0
ファイル: grabseeds.py プロジェクト: tanghaibao/jcvi
def seeds(args):
    """
    %prog seeds [pngfile|jpgfile]

    Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
    """
    p = OptionParser(seeds.__doc__)
    p.set_outfile()
    opts, args, iopts = add_seeds_options(p, args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    pngfile, = args
    pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
    sigma, kernel = opts.sigma, opts.kernel
    rows, cols = opts.rows, opts.cols
    labelrows, labelcols = opts.labelrows, opts.labelcols
    ff = opts.filter
    calib = opts.calibrate
    outdir = opts.outdir
    if outdir != '.':
        mkdir(outdir)
    if calib:
        calib = json.load(must_open(calib))
        pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
        tr = np.array(tr)

    resizefile, mainfile, labelfile, exif = \
                      convert_image(pngfile, pf, outdir=outdir,
                                    rotate=opts.rotate,
                                    rows=rows, cols=cols,
                                    labelrows=labelrows, labelcols=labelcols)

    oimg = load_image(resizefile)
    img = load_image(mainfile)

    fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1,
                                             figsize=(iopts.w, iopts.h))

    # Edge detection
    img_gray = rgb2gray(img)
    logging.debug("Running {0} edge detection ...".format(ff))
    if ff == "canny":
        edges = canny(img_gray, sigma=opts.sigma)
    elif ff == "roberts":
        edges = roberts(img_gray)
    elif ff == "sobel":
        edges = sobel(img_gray)
    edges = clear_border(edges, buffer_size=opts.border)
    selem = disk(kernel)
    closed = closing(edges, selem) if kernel else edges
    filled = binary_fill_holes(closed)

    # Watershed algorithm
    if opts.watershed:
        distance = distance_transform_edt(filled)
        local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
        coordinates = peak_local_max(distance, threshold_rel=.05)
        markers, nmarkers = label(local_maxi, return_num=True)
        logging.debug("Identified {0} watershed markers".format(nmarkers))
        labels = watershed(closed, markers, mask=filled)
    else:
        labels = label(filled)

    # Object size filtering
    w, h = img_gray.shape
    canvas_size = w * h
    min_size = int(round(canvas_size * opts.minsize / 100))
    max_size = int(round(canvas_size * opts.maxsize / 100))
    logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
                    .format(min_size, opts.minsize, max_size, opts.maxsize))

    # Plotting
    ax1.set_title('Original picture')
    ax1.imshow(oimg)

    params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
    if opts.watershed:
        params += ", watershed"
    ax2.set_title('Edge detection\n({0})'.format(params))
    closed = gray2rgb(closed)
    ax2_img = labels
    if opts.edges:
        ax2_img = closed
    elif opts.watershed:
        ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
    ax2.imshow(ax2_img, cmap=iopts.cmap)

    ax3.set_title('Object detection')
    ax3.imshow(img)

    filename = op.basename(pngfile)
    if labelfile:
        accession = extract_label(labelfile)
    else:
        accession = pf

    # Calculate region properties
    rp = regionprops(labels)
    rp = [x for x in rp if min_size <= x.area <= max_size]
    nb_labels = len(rp)
    logging.debug("A total of {0} objects identified.".format(nb_labels))
    objects = []
    for i, props in enumerate(rp):
        i += 1
        if i > opts.count:
            break

        y0, x0 = props.centroid
        orientation = props.orientation
        major, minor = props.major_axis_length, props.minor_axis_length
        major_dx = cos(orientation) * major / 2
        major_dy = sin(orientation) * major / 2
        minor_dx = sin(orientation) * minor / 2
        minor_dy = cos(orientation) * minor / 2
        ax2.plot((x0 - major_dx, x0 + major_dx),
                 (y0 + major_dy, y0 - major_dy), 'r-')
        ax2.plot((x0 - minor_dx, x0 + minor_dx),
                 (y0 - minor_dy, y0 + minor_dy), 'r-')

        npixels = int(props.area)
        # Sample the center of the blob for color
        d = min(int(round(minor / 2 * .35)) + 1, 50)
        x0d, y0d = int(round(x0)), int(round(y0))
        square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)]
        pixels = []
        for row in square:
            pixels.extend(row)
        logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
                        format(i, npixels, len(pixels), 100. * npixels / canvas_size))

        rgb = pixel_stats(pixels)
        objects.append(Seed(filename, accession, i, rgb, props, exif))
        minr, minc, maxr, maxc = props.bbox
        rect = Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, ec='w', lw=1)
        ax3.add_patch(rect)
        mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
        ax3.text(mc, mr, "{0}".format(i), color='w',
                    ha="center", va="center", size=6)

    for ax in (ax2, ax3):
        ax.set_xlim(0, h)
        ax.set_ylim(w, 0)

    # Output identified seed stats
    ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
    ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
    yy = .8
    fw = must_open(opts.outfile, "w")
    if not opts.noheader:
        print(Seed.header(calibrate=calib), file=fw)
    for o in objects:
        if calib:
            o.calibrate(pixel_cm_ratio, tr)
        print(o, file=fw)
        i = o.seedno
        if i > 7:
            continue
        ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
        ax4.text(.1, yy, o.pixeltag, va="center")
        yy -= .04
        ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0,
                      fc=rgb_to_hex(o.rgb)))
        ax4.text(.27, yy, o.hashtag, va="center")
        yy -= .06
    ax4.text(.1 , yy, "(A total of {0} objects displayed)".format(nb_labels),
             color="darkslategrey")
    normalize_axes(ax4)

    for ax in (ax1, ax2, ax3):
        xticklabels = [int(x) for x in ax.get_xticks()]
        yticklabels = [int(x) for x in ax.get_yticks()]
        ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
        ax.set_yticklabels(yticklabels, family='Helvetica', size=8)

    image_name = op.join(outdir, pf + "." + iopts.format)
    savefig(image_name, dpi=iopts.dpi, iopts=iopts)
    return objects
コード例 #13
0
coordinates = peak_local_max(image, min_distance=20)

ax3.imshow(image, cmap=plt.cm.gray)
ax3.autoscale(False)
ax3.plot(coordinates[:, 1],
         coordinates[:, 0], c='r.')
ax3.set_title('Peak local maxima', fontsize=24)
ax3.axis('off')


# Detect edges.
from skimage import filters

edges = filters.canny(image, sigma=3,
                     low_threshold=10,
                     high_threshold=80)

ax4.imshow(edges, cmap=plt.cm.gray)
ax4.set_title('Edges', fontsize=24)
ax4.axis('off')


# Label image regions.
from skimage.measure import regionprops
import matplotlib.patches as mpatches
from skimage.morphology import label

label_image = label(edges)

ax5.imshow(image, cmap=plt.cm.gray)
コード例 #14
0
def seeds(args):
    """
    %prog seeds [pngfile|jpgfile]

    Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
    """
    p = OptionParser(seeds.__doc__)
    p.set_outfile()
    opts, args, iopts = add_seeds_options(p, args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    pngfile, = args
    pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
    sigma, kernel = opts.sigma, opts.kernel
    rows, cols = opts.rows, opts.cols
    labelrows, labelcols = opts.labelrows, opts.labelcols
    ff = opts.filter
    calib = opts.calibrate
    outdir = opts.outdir
    if outdir != '.':
        mkdir(outdir)
    if calib:
        calib = json.load(must_open(calib))
        pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
        tr = np.array(tr)

    resizefile, mainfile, labelfile, exif = \
                      convert_image(pngfile, pf, outdir=outdir,
                                    rotate=opts.rotate,
                                    rows=rows, cols=cols,
                                    labelrows=labelrows, labelcols=labelcols)

    oimg = load_image(resizefile)
    img = load_image(mainfile)

    fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1,
                                             figsize=(iopts.w, iopts.h))

    # Edge detection
    img_gray = rgb2gray(img)
    logging.debug("Running {0} edge detection ...".format(ff))
    if ff == "canny":
        edges = canny(img_gray, sigma=opts.sigma)
    elif ff == "roberts":
        edges = roberts(img_gray)
    elif ff == "sobel":
        edges = sobel(img_gray)
    edges = clear_border(edges, buffer_size=opts.border)
    selem = disk(kernel)
    closed = closing(edges, selem) if kernel else edges
    filled = binary_fill_holes(closed)

    # Watershed algorithm
    if opts.watershed:
        distance = distance_transform_edt(filled)
        local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
        coordinates = peak_local_max(distance, threshold_rel=.05)
        markers, nmarkers = label(local_maxi, return_num=True)
        logging.debug("Identified {0} watershed markers".format(nmarkers))
        labels = watershed(closed, markers, mask=filled)
    else:
        labels = label(filled)

    # Object size filtering
    w, h = img_gray.shape
    canvas_size = w * h
    min_size = int(round(canvas_size * opts.minsize / 100))
    max_size = int(round(canvas_size * opts.maxsize / 100))
    logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
                    .format(min_size, opts.minsize, max_size, opts.maxsize))

    # Plotting
    ax1.set_title('Original picture')
    ax1.imshow(oimg)

    params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
    if opts.watershed:
        params += ", watershed"
    ax2.set_title('Edge detection\n({0})'.format(params))
    closed = gray2rgb(closed)
    ax2_img = labels
    if opts.edges:
        ax2_img = closed
    elif opts.watershed:
        ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
    ax2.imshow(ax2_img, cmap=iopts.cmap)

    ax3.set_title('Object detection')
    ax3.imshow(img)

    filename = op.basename(pngfile)
    if labelfile:
        accession = extract_label(labelfile)
    else:
        accession = pf

    # Calculate region properties
    rp = regionprops(labels)
    rp = [x for x in rp if min_size <= x.area <= max_size]
    nb_labels = len(rp)
    logging.debug("A total of {0} objects identified.".format(nb_labels))
    objects = []
    for i, props in enumerate(rp):
        i += 1
        if i > opts.count:
            break

        y0, x0 = props.centroid
        orientation = props.orientation
        major, minor = props.major_axis_length, props.minor_axis_length
        major_dx = cos(orientation) * major / 2
        major_dy = sin(orientation) * major / 2
        minor_dx = sin(orientation) * minor / 2
        minor_dy = cos(orientation) * minor / 2
        ax2.plot((x0 - major_dx, x0 + major_dx),
                 (y0 + major_dy, y0 - major_dy), 'r-')
        ax2.plot((x0 - minor_dx, x0 + minor_dx),
                 (y0 - minor_dy, y0 + minor_dy), 'r-')

        npixels = int(props.area)
        # Sample the center of the blob for color
        d = min(int(round(minor / 2 * .35)) + 1, 50)
        x0d, y0d = int(round(x0)), int(round(y0))
        square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)]
        pixels = []
        for row in square:
            pixels.extend(row)
        logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
                        format(i, npixels, len(pixels), 100. * npixels / canvas_size))

        rgb = pixel_stats(pixels)
        objects.append(Seed(filename, accession, i, rgb, props, exif))
        minr, minc, maxr, maxc = props.bbox
        rect = Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, ec='w', lw=1)
        ax3.add_patch(rect)
        mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
        ax3.text(mc, mr, "{0}".format(i), color='w',
                    ha="center", va="center", size=6)

    for ax in (ax2, ax3):
        ax.set_xlim(0, h)
        ax.set_ylim(w, 0)

    # Output identified seed stats
    ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
    ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
    yy = .8
    fw = must_open(opts.outfile, "w")
    if not opts.noheader:
        print >> fw, Seed.header(calibrate=calib)
    for o in objects:
        if calib:
            o.calibrate(pixel_cm_ratio, tr)
        print >> fw, o
        i = o.seedno
        if i > 7:
            continue
        ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
        ax4.text(.1, yy, o.pixeltag, va="center")
        yy -= .04
        ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0,
                      fc=rgb_to_hex(o.rgb)))
        ax4.text(.27, yy, o.hashtag, va="center")
        yy -= .06
    ax4.text(.1 , yy, "(A total of {0} objects displayed)".format(nb_labels),
             color="darkslategrey")
    normalize_axes(ax4)

    for ax in (ax1, ax2, ax3):
        xticklabels = [int(x) for x in ax.get_xticks()]
        yticklabels = [int(x) for x in ax.get_yticks()]
        ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
        ax.set_yticklabels(yticklabels, family='Helvetica', size=8)

    image_name = op.join(outdir, pf + "." + iopts.format)
    savefig(image_name, dpi=iopts.dpi, iopts=iopts)
    return objects
コード例 #15
0
from skimage import data,filters,io,color
# img=color.rgb2gray(data.coffee())
# 若要读取自己的图片,可以采用以下代码:
# 其中,path是图片所在的目录,尽量是完整的目录,灰度化处理同样,采用
img=io.imread(".\img\yu1.png",1)
color.rgb2gray(img)
edge_img=filters.canny(img)
io.imshow(edge_img)
io.show()
コード例 #16
0
grad_x = np.diff(coffee_gray, axis=1)
io.imshow(grad_x)

#working with filters

#guassian filter
filtered1 = filters.gaussian(coffee, sigma=3)
io.imshow(filtered1)
filtered2 = filters.gaussian(coffee, sigma=[1, 10])
io.imshow(filtered2)

#sobel filter for edge detection
text = data.text()
io.imshow(text)
edges = filters.sobel(text)
io.imshow(edges)

#canny edge detector
edges1 = filters.canny(coffee_gray)
io.imshow(edges1)
edges2 = filters.canny(coffee_gray, sigma=2)
io.imshow(edges2)
edges3 = filters.canny(coffee_gray,
                       sigma=2,
                       low_threshold=0.2,
                       high_threshold=0.5)
io.imshow(edges3)

#feature detection
from skimage.feature import corner_harris, peak_local_max, corner_peaks
コード例 #17
0
ファイル: filter_canny.py プロジェクト: jboy/ncss2016
import matplotlib.pyplot as plt
from scipy.misc import imread
img = imread("Slides/Images/IMAG0537.jpg")

from skimage.color import rgb2grey
try:
    # http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.canny
    from skimage.filters import canny
except ImportError:
    # Must be an older version of skimage...
    from skimage.filter import canny

img_g = rgb2grey(img)
# Canny only accepts greyscale images.
edges = canny(img_g, 2)

fig, (ax0, ax1) = plt.subplots(nrows=2)
ax0.imshow(img_g, cmap=plt.cm.Greys_r)
ax0.set_title("Original image")
ax0.axis("off")
ax1.imshow(edges, cmap=plt.cm.Greys_r)
ax1.set_title("Edges by Canny")
ax1.axis("off")
plt.show()
コード例 #18
0
ファイル: cbar_detect.py プロジェクト: stefanv/cbar_detector
from skimage import measure, io, color, img_as_float, filters
import matplotlib.pyplot as plt
import numpy as np


img = img_as_float(io.imread('test_figs/ligo.jpg'))
#mask = color.deltaE_ciede2000(img, (1, 1, 1)) > 0.2
mask = filters.canny(color.rgb2gray(img))
labels = measure.label(mask)
regions = measure.regionprops(labels)

def bbox_area(bbox):
    r0, c0, r1, c1 = bbox
    return abs((r0 - r1) * (c0 - c1))

regions = [r for r in regions if r.convex_area / bbox_area(r.bbox) > 0.9]
regions = [r for r in regions if r.area > 200]

out = np.zeros_like(img)
for r in regions:
    out[r.coords[:, 0], r.coords[:, 1]] = r.convex_area / bbox_area(r.bbox)

#plt.imshow((labels * 1123) % 87, cmap='spectral')
plt.imshow(out, cmap='gray')
plt.colorbar()
plt.show()
コード例 #19
0
ファイル: cbar_detect.py プロジェクト: stefanv/cbar_detector
from skimage import measure, io, color, img_as_float, filters
import matplotlib.pyplot as plt
import numpy as np

img = img_as_float(io.imread('test_figs/ligo.jpg'))
#mask = color.deltaE_ciede2000(img, (1, 1, 1)) > 0.2
mask = filters.canny(color.rgb2gray(img))
labels = measure.label(mask)
regions = measure.regionprops(labels)


def bbox_area(bbox):
    r0, c0, r1, c1 = bbox
    return abs((r0 - r1) * (c0 - c1))


regions = [r for r in regions if r.convex_area / bbox_area(r.bbox) > 0.9]
regions = [r for r in regions if r.area > 200]

out = np.zeros_like(img)
for r in regions:
    out[r.coords[:, 0], r.coords[:, 1]] = r.convex_area / bbox_area(r.bbox)

#plt.imshow((labels * 1123) % 87, cmap='spectral')
plt.imshow(out, cmap='gray')
plt.colorbar()
plt.show()