示例#1
0
def test_float_out_of_range():
    too_high = np.array([2], dtype=np.float32)
    with testing.raises(ValueError):
        img_as_int(too_high)
    too_low = np.array([-2], dtype=np.float32)
    with testing.raises(ValueError):
        img_as_int(too_low)
def get_points(img):
    """
    Gets the four points of the four corner points 
    of the border edges and uses the fact that the 
    image is a closed convex set and thus extrema 
    will occur at corner points.
    
    Parameters
    ----------
    input: image represented as np array
    
    output: the coordinates of the 4 corner points
    """
    
    
    im = img_as_int(img).astype('int16')
    actv_lst = get_ones_cy.get_ones_fast(im)
    
    p2 = min(actv_lst)
    p4 = max(actv_lst)
    
    
    img_to_flip  = Image.fromarray(im)
    transposed  = img_to_flip.transpose(Image.ROTATE_90)
    rotated = np.array(transposed)
    rotated = img_as_int(rotated)
    other_lst = get_ones_cy.get_ones_fast(rotated)
    
    p1 = max(other_lst)
    p1 = switch_pair(p1,im)
    
    p3 = min(other_lst)
    p3 = switch_pair(p3,im)

    return p1,p2,p3,p4
示例#3
0
def test_float_out_of_range():
    too_high = np.array([2], dtype=np.float32)
    with testing.raises(ValueError):
        img_as_int(too_high)
    too_low = np.array([-2], dtype=np.float32)
    with testing.raises(ValueError):
        img_as_int(too_low)
示例#4
0
 def as_type(self: _T, dtype: DType) -> _T:
     if dtype == DType.FLOAT:
         return self.from_self(data=img_as_float(self._data))
     elif dtype == DType.UNSIGNED_INT:
         return self.from_self(data=img_as_uint(self._data))
     elif dtype == DType.INT:
         return self.from_self(data=img_as_int(self._data))
示例#5
0
def color_check(plugin, fmt='png'):
    """Check roundtrip behavior for color images.

    All major input types should be handled as ubytes and read
    back correctly.
    """
    img = img_as_ubyte(data.chelsea())
    r1 = roundtrip(img, plugin, fmt)
    testing.assert_allclose(img, r1)

    img2 = img > 128
    r2 = roundtrip(img2, plugin, fmt)
    testing.assert_allclose(img2.astype(np.uint8), r2)

    img3 = img_as_float(img)
    r3 = roundtrip(img3, plugin, fmt)
    testing.assert_allclose(r3, img)

    img4 = img_as_int(img)
    if fmt.lower() in (('tif', 'tiff')):
        img4 -= 100
        r4 = roundtrip(img4, plugin, fmt)
        testing.assert_allclose(r4, img4)
    else:
        r4 = roundtrip(img4, plugin, fmt)
        testing.assert_allclose(r4, img_as_ubyte(img4))

    img5 = img_as_uint(img)
    r5 = roundtrip(img5, plugin, fmt)
    testing.assert_allclose(r5, img)
示例#6
0
def mono_check(plugin, fmt='png'):
    """Check the roundtrip behavior for images that support most types.

    All major input types should be handled.
    """

    img = img_as_ubyte(data.moon())
    r1 = roundtrip(img, plugin, fmt)
    testing.assert_allclose(img, r1)

    img2 = img > 128
    r2 = roundtrip(img2, plugin, fmt)
    testing.assert_allclose(img2.astype(np.uint8), r2)

    img3 = img_as_float(img)
    r3 = roundtrip(img3, plugin, fmt)
    if r3.dtype.kind == 'f':
        testing.assert_allclose(img3, r3)
    else:
        testing.assert_allclose(r3, img_as_uint(img))

    img4 = img_as_int(img)
    if fmt.lower() in (('tif', 'tiff')):
        img4 -= 100
        r4 = roundtrip(img4, plugin, fmt)
        testing.assert_allclose(r4, img4)
    else:
        r4 = roundtrip(img4, plugin, fmt)
        testing.assert_allclose(r4, img_as_uint(img4))

    img5 = img_as_uint(img)
    r5 = roundtrip(img5, plugin, fmt)
    testing.assert_allclose(r5, img5)
def get_blobs(binary_image):
    integered = skimage.img_as_int(binary_image)
    labeled = skimage.measure.label(integered)
    props = skimage.measure.regionprops(label_image=labeled,
                                        intensity_image=integered,
                                        cache=True)
    return props
示例#8
0
 def _encode_image(self, img_arr):
     """Save the image array as PNG and then encode with base64 for embedding"""
     img_arr = img_as_int(img_arr)
     sio = BytesIO()
     sp_imsave(sio, img_arr, 'png')
     encoded = b64encode(sio.getvalue()).decode()
     sio.close()
     return encoded
示例#9
0
def normalize_img(img, as_int=False, max_val=255):
    min_b, max_b = min_max_img(img)
    range_b = max_b - min_b
    n_img = np.zeros_like(img, dtype=float)
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            n_img[i, j] = (img[i, j] - min_b) / range_b
    if (as_int): n_img = sk.img_as_int(n_img * max_val)
    return n_img
def prewitt_edge(image_name):
    img_file = cv2.imread(image_name, 0)
    img = img_as_int(img_file)

    prewitt_vertical = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype="float64")
    prewitt_vertical_out = img_as_ubyte(ndimage.convolve(img, prewitt_vertical, mode='constant', cval=0.0))
    
    plt.imshow(prewitt_vertical_out)
    plt.title("Prewitt Edge Detection")
    plt.show()
示例#11
0
def image_opener(path):
    directory = os.listdir(path)
    image_list = []
    path_list = []
    for file in directory:
        full_path = path + file
        path_list.append(full_path)
        image = skimage.data.load(full_path, as_gray=True)
        image = skimage.img_as_int(image)
        image = image.flatten()
        image_list.append(np.array(image))
    return image_list, path_list
    def save_images(predictions, segmented_folder):
        if not os.path.exists(segmented_folder):
            os.mkdir(segmented_folder)

        for i, batch_preds in enumerate(predictions):
            batch_preds = (batch_preds >= 0.5).astype(np.uint8)
            n_images = batch_preds.shape[0]
            for j in range(n_images):
                img_arr = img_as_int(np.squeeze(batch_preds[j]))
                io.imsave(fname=segmented_folder +
                          "/img_{}_{}.png".format(i, j),
                          arr=img_arr)
示例#13
0
def obtainLabels(img):
    data_array = cv2.imread(img, 1)

    b, g, r = cv2.split(data_array)

    r = adjust_gamma(r, gamma=1.5)

    # r = img_as_float(r)
    # labels_walk = random_walker(r, markers, beta=10, mode='bf')
    labels_walk = chan_vese(r,
                            mu=0.25,
                            lambda1=1,
                            lambda2=2,
                            tol=1e-3,
                            max_iter=200,
                            dt=0.5,
                            init_level_set="checkerboard",
                            extended_output=True)[1]

    labels_walk[labels_walk >= 0.2] = 1
    labels_walk[labels_walk < 0.2] = 0
    labels_walk = img_as_int(labels_walk)

    ######################################################################
    # Small spurious objects are easily removed by setting a minimum size for valid objects.
    labels_walk = morphology.remove_small_objects(labels_walk,
                                                  100,
                                                  connectivity=4)

    # Watershed

    D = ndi.distance_transform_edt(labels_walk)
    localMax = peak_local_max(D,
                              indices=False,
                              min_distance=20,
                              labels=labels_walk)
    # perform a connected component analysis on the local peaks,
    # using 8-connectivity, then appy the Watershed algorithm
    markers = ndi.label(localMax, structure=np.ones((3, 3)))[0]
    labels = watershed(-D, markers, mask=labels_walk)
    print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))

    # Un comment to bypass watershed
    labels, num = ndi.label(labels_walk, structure=np.ones((3, 3)))

    # remove edge nuclei
    labels_image = clear_border(labels)
    # recount labels
    number_regions = np.delete(np.unique(labels_image), 0)

    return labels_image, number_regions, r, g
示例#14
0
def segment_conidia(input_file, output_file):
    img = io.imread(input_file)
    img = img_as_float(img)
    img = filters.gaussian(img, 1)

    thresholded = img_as_ubyte(img > filters.threshold_otsu(img))
    thresholded = mahotas.close_holes(thresholded)

    distance = ndi.distance_transform_edt(thresholded)
    local_maxi = distance == morph.dilation(distance, morph.square(5))
    local_maxi[thresholded == 0] = 0
    markers = ndi.label(local_maxi)[0]

    labels = watershed(-distance, markers, mask=thresholded)
    tifffile.imsave(output_file, img_as_int(labels), compress=5)
示例#15
0
文件: image.py 项目: goulu/Goulib
 def getdata(self, dtype=np.uint8, copy=True):
     a = self.array
     if a.dtype == dtype:
         if copy:  # to be coherent
             a = np.copy(self.array)
     elif dtype == np.float:
         a = skimage.img_as_float(a, copy)
     elif dtype == np.int16:
         a = skimage.img_as_int(a, copy)
     elif dtype == np.uint16:
         a = skimage.img_as_uint(a, copy)
     elif dtype == np.uint8:
         a = skimage.img_as_ubyte(a, copy)
     else:
         pass  # keep the wrong type for now and see what happens
     return a
def save_image():

    # creating a image object (main image)
    im1 = io.imread(window.filename)
    im1 = rgb2gray(im1)
    im1 = sm.img_as_int(im1)

    # save a image using extension
    io.imsave(new_path(), im1)

    global selected_image
    selected_image = ImageTk.PhotoImage(
        Image.open(new_path()).resize((640, 400), Image.ANTIALIAS))
    image = Label(window, image=selected_image).grid(row=7,
                                                     column=0,
                                                     columnspan=3)
示例#17
0
文件: image.py 项目: goulu/Goulib
 def getdata(self, dtype=np.uint8, copy=True):
     a = self.array
     if a.dtype == dtype:
         if copy:  # to be coherent
             a = np.copy(self.array)
     elif dtype == np.float:
         a = skimage.img_as_float(a, copy)
     elif dtype == np.int16:
         a = skimage.img_as_int(a, copy)
     elif dtype == np.uint16:
         a = skimage.img_as_uint(a, copy)
     elif dtype == np.uint8:
         a = skimage.img_as_ubyte(a, copy)
     else:
         pass  # keep the wrong type for now and see what happens
     return a
示例#18
0
def findROI(img):
    img2 = ski.color.rgb2hsv(img)
    msk0v = ski.filters.threshold_otsu(img2[:, :, 0])
    msk1v = ski.filters.threshold_otsu(img2[:, :, 1])

    msk0 = img2[:, :, 0] > msk0v
    msk1 = img2[:, :, 1] > msk1v

    msk0 = morp.remove_small_objects(msk0, min_size=36)
    msk1 = morp.remove_small_objects(msk1, min_size=36)

    msk3 = np.logical_and(msk0, msk1)
    disk = morp.disk(20)
    msk3 = morp.binary_dilation(msk3, disk)

    msk = ski.img_as_int(msk3)
    return msk
示例#19
0
def image_segmentation(in_file_name, out_file_name, show_image, numseg):
    n1_img = nib.load(in_file_name)
    img_data = n1_img.get_data()
    print(img_data.shape)

    slice = np.zeros((img_data.shape[0],img_data.shape[1], img_data.shape[2]))
    segm= np.zeros((img_data.shape[0],img_data.shape[1], img_data.shape[2]), dtype = np.int16)
    for i in range(img_data.shape[2]):
        slice[:,:, i] = img_data[:, :, i, 0]
        if (slice[i].min() >= 0):
            labels1 = segmentation.slic(slice[:,:, i], compactness=30, n_segments=numseg, multichannel=False)
            segm[:, :, i] = skimage.img_as_int(labels1)


    if (show_image):
        show_slices([slice[100], slice[110], slice[120]])
        plt.suptitle("slices")

    for i in range(img_data.shape[2]):
        img_data[:, :, i, 0] = segm[:, :, i]

    if (show_image):
        # display results
        fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)

        ax1.imshow(img_data[:,:, 100, 0])
        ax1.axis('off')
        ax1.set_title('image 100', fontsize=20)

        ax2.imshow(img_data[:,:,110,0])
        ax2.axis('off')
        ax2.set_title('image 110', fontsize=20)

        ax3.imshow(img_data[:,:,120,0])
        ax3.axis('off')
        ax3.set_title('image 120', fontsize=20)

        fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
                            bottom=0.02, left=0.02, right=0.98)

        plt.show()

    save_img = nib.Nifti1Image(img_data, np.eye(4))
    nib.save(save_img, out_file_name)
    return 0
def band_at_timepoint_to_zarr(
        timepoint_fn,
        timepoint_number,
        band,
        band_number,
        *,
        out_zarrs=None,
        min_level_shape=(1024, 1024),
        num_timepoints=None,
        num_bands=None,
):
    basepath = os.path.splitext(os.path.basename(timepoint_fn))[0]
    path = basepath + '/' + basepath + '_' + band + '.tif'
    image = ziptiff2array(timepoint_fn, path)
    shape = image.shape
    dtype = image.dtype
    max_layer = np.log2(
        np.max(np.array(shape) / np.array(min_level_shape))
    ).astype(int)
    pyramid = pyramid_gaussian(image, max_layer=max_layer, downscale=DOWNSCALE)
    im_pyramid = list(pyramid)
    if isinstance(out_zarrs, str):
        fout_zarr = out_zarrs
        out_zarrs = []
        compressor = Blosc(cname='zstd', clevel=9, shuffle=Blosc.SHUFFLE, blocksize=0)
        for i in range(len(im_pyramid)):
            r, c = im_pyramid[i].shape
            out_zarrs.append(zarr.open(
                    os.path.join(fout_zarr, str(i)), 
                    mode='a', 
                    shape=(num_timepoints, num_bands, 1, r, c), 
                    dtype=np.int16,
                    chunks=(1, 1, 1, *min_level_shape), 
                    compressor=compressor,
                )
            )

    # for each resolution:
    for pyramid_level, downscaled in enumerate(im_pyramid):
        # convert back to int16
        downscaled = skimage.img_as_int(downscaled)
        # store into appropriate zarr
        out_zarrs[pyramid_level][timepoint_number, band_number, 0, :, :] = downscaled
    
    return out_zarrs
示例#21
0
    def createmultipleinputs(inputpath):
        # pad to square
        im = pngread(inputpath)
        if len(im.shape) == 3:
            print(
                'Images should be grayscale but had dimensions {} - automatically converted'
                .format(im.shape))
            im = np.sum(im, 2)
        im = np.uint16(
            img_as_int(exposure.rescale_intensity(im,
                                                  out_range=(0, 2**15 - 1))))
        imshape = im.shape
        edgediff = np.max(imshape) - np.min(imshape)
        orig = im
        if imshape[1] > imshape[0]:
            orig = cv2.copyMakeBorder(im,
                                      math.ceil(edgediff / 2),
                                      math.ceil(edgediff / 2),
                                      0,
                                      0,
                                      cv2.BORDER_CONSTANT,
                                      value=[0, 0, 0])
        if imshape[0] > imshape[1]:
            orig = cv2.copyMakeBorder(im,
                                      0,
                                      0,
                                      math.ceil(edgediff / 2),
                                      math.ceil(edgediff / 2),
                                      cv2.BORDER_CONSTANT,
                                      value=[0, 0, 0])

        # ==>resize to 1024
        im1024 = cv2.resize(orig, (1024, 1024), interpolation=cv2.INTER_AREA)
        # ==>resize to 720
        im720 = cv2.resize(orig, (720, 720), interpolation=cv2.INTER_AREA)
        # preprocess both
        im1024preproc = preproc(im1024)
        im720preproc = preproc(im720)
        return ([orig, im1024preproc, im720preproc, im1024, im720])
示例#22
0
def dynamic_masking(image):
    """ Dynamically masks out the objects in the PIV images
    
    Parameters
    ----------
    image: image
        a two dimensional array of uint16, uint8 or similar type
            
    Returns
    -------
    image : 2d np.ndarray of floats
        
    """
    image = exposure.rescale_intensity(image, in_range=(0, 1))
    blurback = gaussian_filter(median_filter(image,size=3),sigma=3)
    # create the boolean mask 
    bw = (blurback > .3).astype('bool')
    bw = binary_fill_holes(bw)
    image[bw] = 0.0    # mask out the white regions
    image -= blurback  # subtrack the blurred background
    # subtraction causes negative values, we need to rescale it back to 0-1 interval
    image = img_as_int(exposure.rescale_intensity(image,in_range=(0,1)))
    
    return image
示例#23
0
 def test_int(self):
     image = skimage.img_as_int(lena)
     self._test_image(image)
示例#24
0
def save_recon(data_type, save_dtype, npad, data, fname):
    '''
    Method for saving. Data are converted based on user specified options,
    then exported as tif stack or netcdf3 .volume file. Format conversions
    are very slow. Raw data usually saves quickly, but data that has been
    changed to float format is slow.

    Parameters
    -------
    data_type : str
            String of current data format.
    save_dtype : str
            String of what the save format will be.
    npad : int, optional
            Sizing of the padding done to the dataset.
    data : ndarray
            Array of data to be saved.
    _fname : str
            String of what the dataset is called and file save will be named.

    Returns
    -------
    Nothing
    '''

    ## Setup copy of data to allow user to scale and save at different file
    ## types (e.g. 8 bit, 16 bit, etc.). Must check to see if data are padded.
    if npad == 0:
        save_data = data[:]
    ## Exporting data without padding.
    if npad != 0:  #was padded.
        if data.shape[1] == data.shape[2]:  #padded and reconstructed.
            save_data = data[:, npad:data.shape[1] - npad,
                             npad:data.shape[2] - npad]
        if data.shape[1] != data.shape[2]:  #padded and NOT reconstructed.
            save_data = data[:, :, npad:data.shape[2] - npad]
    ## Scales the data appropriately.
    ## This is extremely slow from float32 to other formats.
    a = float(save_data.min())
    b = float(save_data.max()) - a
    if save_dtype == 'u1':
        save_data = ((save_data - a) / b) * 255.
        save_data = save_data.astype(np.uint8)
    if save_dtype == 'u2':
        save_data = ((save_data - a) / b) * 65535.
        save_data = save_data.astype(np.uint16)
    ## This allows processed data (float 32) be saved as signed integer (16 signed int) which is same as raw data.
    if save_dtype == 'u2' and data.dtype == 'float32':
        save_data = ((save_data - a) / b)
        for i in range(save_data.shape[0]):
            save_data[i, :, :] = skimage.img_as_int(save_data[i, :, :])
    '''
    Data exporting.
    '''
    ## Create tif stack within a temp folder in the current working directory.
    if data_type == '.tif':
        dx.write_tiff_stack(save_data,
                            fname=fname,
                            dtype=save_dtype,
                            overwrite=True)
    ## Create a .volume netCDF3 file.
    ## netndf3 does not support unsigned integers.
    if data_type == '.vol':
        ## Creates the empty file, and adds metadata.
        ncfile = Dataset(
            fname + '_tomopy_recon.volume',
            'w',
            format='NETCDF3_64BIT',
            clobber=True)  # Will overwrite if pre-existing file is found.
        ncfile.description = 'Tomography dataset'
        ncfile.source = 'APS GSECARS 13BM'
        ncfile.history = "Created " + time.ctime(time.time())
        ## Creates the correct dimensions for the file.
        NX = ncfile.createDimension('NX', save_data.shape[2])
        NY = ncfile.createDimension('NY', save_data.shape[1])
        NZ = ncfile.createDimension('NZ', save_data.shape[0])
        print('save_dtype is ', save_dtype)
        ## Creates variable for data based on previously constructed dimensions.
        volume = ncfile.createVariable('VOLUME', save_dtype, (
            'NZ',
            'NY',
            'NX',
        ))
        ## Copies data into empty file array.
        volume[:] = save_data
        print('volume ', volume.shape, type(volume), volume.dtype)
        ncfile.close()
    del save_data
示例#25
0
import cv2
from skimage import img_as_int, img_as_ubyte
from PIL import Image
import scipy.ndimage.filters as flt
import numpy as np

img_gray = img_as_int(cv2.imread('giraffe.jpg', 0))

sobel_vertical = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])

Image.fromarray(flt.convolve(img_gray, sobel_vertical)).show()

sobel_horizontal = sobel_vertical.T
Image.fromarray(flt.convolve(img_gray, sobel_horizontal)).show()

sobel_x = cv2.Sobel(img_gray, -1, 1, 0, ksize=5)
sobel_y = cv2.Sobel(img_gray, -1, 0, 1, ksize=5)

Image.fromarray(sobel_x).show()
Image.fromarray(sobel_y).show()
示例#26
0
def main():
    parser = optparse.OptionParser()
    parser.add_option('-i', '--image', type=str, default='empty_dewar/puck4_five_missing.jpg', help='Specify the image')
    parser.add_option('-c', '--combine', type=int, default=1, help='Specify the number of images to combine')
    options, args = parser.parse_args()
    
    if options.combine == 1:
        original_image = img_as_float(imread(options.image))
    else:
        original_image = img_as_float(combine(options.image, length=options.combine))
    
    fig, axes = plt.subplots(3, 4)
    a = axes[0, 0]
    a.imshow(original_image)
    a.set_title('original image')
    selem = disk(30)
   
    gray_image = color.rgb2gray(original_image)
    b = axes[0, 1]
    b.imshow(gray_image, cmap='gray')
    b.set_title('gray image')

    img_rank = rank.equalize(gray_image, selem=selem)
    c = axes[0, 2]
    c.imshow(img_rank, cmap='gray')
    c.set_title('rank equalized image')

    edges = canny(img_rank, sigma=5)
    #img_med = median(gray_image, selem=selem)
    d = axes[0, 3]
    d.imshow(edges, cmap='gray')
    d.set_title('edges from rank equalized')
        
    e = axes[1, 0]
    img_sharp = unsharp(gray_image)
    img_sharp = img_sharp/float(img_sharp.max())
    e.imshow(img_sharp, cmap='gray')
    imsave('img_sharp.jpg', img_sharp)
    e.set_title('unsharp')
    
    f = axes[1, 1]
    edges = canny(img_sharp, sigma=7.0, low_threshold=0.04, high_threshold=0.05)
    f.imshow(gaussian(edges, 3), cmap='gray')
    f.set_title('edges from unsharp image sigma=7')
     
    g = axes[1, 2]
    edges = canny(img_sharp, sigma=2.0, low_threshold=0.04, high_threshold=0.05)
    g.imshow(gaussian(edges, 3), cmap='gray')
    g.set_title('edges from unsharp image sigma=2')
    
    h = axes[1, 3]
    edges = canny(img_sharp, sigma=3.0, low_threshold=0.04, high_threshold=0.05)
    h.imshow(gaussian(edges, 3), cmap='gray')
    h.set_title('edges from unsharp image sigma=3')
    
    i = axes[2, 0]
    edges = canny(img_sharp, sigma=4.0, low_threshold=0.04, high_threshold=0.05)
    i.imshow(gaussian(edges, 3), cmap='gray')
    i.set_title('edges from unsharp image sigma=4')
    #j = axes[2, 1]
    #j.imshow(gaussian(img_sharp, sigma=4), cmap='gray')
    #j.set_title('gaussian on unsharp sigma=4')
    
    imsave('edges.jpg', img_as_int(edges))
    print edges
    #j = axes[2, 1]
    #result = hough_ellipse(edges, min_size=20, max_size=100)
    #result.sort(order='accumulator', reverse=True)
    #print 'result', result
    #img_eli = img_gray.copy()
    #for best in result[:1]:
        #yc, xc, a, b = [int(round(x)) for x in best[1:5]]
        #orientation = best[5]
        ## Draw the ellipse on the original image
        #cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
        #img_eli[cy, cx] = 1.
    #g.imshow(img_eli)
    #g.set_title('detected ellipses')
    
    k = axes[2, 2]
    med_unsharp = median(img_sharp/img_sharp.max(), selem=disk(10))
    k.imshow(med_unsharp, cmap='gray')
    k.set_title('median on unsharp')
    
    sharp_med_unsharp = unsharp(med_unsharp)
    l = axes[2, 3]
    edges_med = canny(sharp_med_unsharp, sigma=7) #, high_threshold=0.2)
    #edges_med = gaussian(edges_med, 7)
    imsave('edges_med.jpg', img_as_int(edges_med))
    l.imshow(gaussian(edges_med, 3), cmap='gray')
    l.set_title('edges from med unsharp')
    

 
    #abcdefghijkl
    ##i = axes[2,0]
    ##i.imshow(original_image[:,:,0], cmap='gray')
    ##i.set_title('red channel')
    
    ##j = axes[2,1]
    ##j.imshow(original_image[:,:,1], cmap='gray')
    ##j.set_title('green channel')
    
    ##k = axes[2,2]
    ##k.imshow(original_image[:,:,2], cmap='gray')
    ##k.set_title('blue channel')
    
    plt.show()
#img = cv2.imread('images/profile.jpg', 0)
img = cv2.imread('images/moon.jpg',0)

sobel_operator_v = np.array([
  [-1, 0, 1],
  [-2, 0 ,2],
  [-1, 0, 1]
])

sobelX = cv2.Sobel(img, -1, 1, 0, ksize=5)
sobelY = cv2.Sobel(img, -1, 0, 1, ksize=5)

subplot(2,2,1)
plt.imshow(sobelX, cmap='gray')
plt.title('(-1, 1, 0)')

subplot(2,2,2)
plt.imshow(sobelY, cmap='gray')
plt.title('(-1, 0, 1)')

subplot(2,2,3)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v), cmap='gray')
plt.title('sobel vertical')

subplot(2,2,4)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v.T), cmap='gray')
plt.title('sobel horizontal')

plt.show()
示例#28
0
from cv2 import cv2
from skimage import img_as_int,img_as_ubyte
from scipy import ndimage
import numpy as np
from matplotlib import pyplot as plt

witcher = cv2.imread('witcher1.jpg',0)
img = img_as_int(witcher)

prewit_vertical = np.array([[-1,0,1],[-1,0,1],[-1,0,1]],dtype='float64')

prewit_vertical_out = img_as_ubyte(ndimage.convolve(img,prewit_vertical,mode='constant',cval=0.0))
plt.imshow(prewit_vertical_out, cmap="gray")
plt.show()
示例#29
0
def test_downcast():
    x = np.arange(10).astype(np.uint64)
    with expected_warnings('Downcasting'):
        y = img_as_int(x)
    assert np.allclose(y, x.astype(np.int16))
    assert y.dtype == np.int16, y.dtype
def _detect_edges_with_operator(image, operator):
    prewitt_vertical = numpy.array(operator, dtype='float64')
    return ndimage.convolve(img_as_int(image), prewitt_vertical)
示例#31
0
def test_downcast():
    x = np.arange(10).astype(np.uint64)
    with expected_warnings(['Downcasting']):
        y = img_as_int(x)
    assert np.allclose(y, x.astype(np.int16))
    assert y.dtype == np.int16, y.dtype
示例#32
0
# Convert to float: Important for subtraction later which won't work with uint8
image = greybn
image = gaussian_filter(image, 1)

seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image

dimensions = np.shape(image)
x = dimensions[0]
y = dimensions[1]

dilated = reconstruction(seed, mask, method='dilation')
# Subtracting the dilated image leaves an image with just the coins and a flat, black background, as shown below.

final = image - dilated
final = img_as_int(final)

cleanFinal = morphology.remove_small_objects(final, 21)

fig = plt.figure(figsize=(5, 5))

plt.imshow(cleanFinal)  # cmap='gray'
plt.colorbar()
plt.axis('off')

# plt.savefig("/Users/Spencer/PycharmProjects/PieceRecognizer/derived/dilated.png", dpi=600)

# plt.show()
import cv2
from skimage import img_as_ubyte, img_as_int, filters
from scipy import ndimage
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image

# Vertical Edges with Prewitt Mask
moon = cv2.imread(r'C:\Users\Patxi\Downloads\images\images\moon.jpg', 0)
prewitt_vertical = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]],
                            dtype='float64')  # Kernel
moon_int = img_as_int(moon)
prewitt_vertical_out = img_as_ubyte(
    ndimage.convolve(moon_int, prewitt_vertical))
plt.imshow(prewitt_vertical_out, cmap='gray')
plt.show()

# Horizontal Edges with Prewitt Mask
prewitt_horizontal = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]],
                              dtype='float64')  # Kernel
prewitt_horizontal_out = img_as_ubyte(
    ndimage.convolve(moon_int, prewitt_vertical))
plt.imshow(prewitt_horizontal_out, cmap='gray')
plt.show()

# Edge detection with Sobel Operator
a = Image.open(r'C:\Users\Patxi\Downloads\images\images\moon.jpg')
b = filters.sobel(a)
plt.imshow(b, cmap='gray')
plt.show()
masks_folder = "./data/2d_masks/"
tr_paths, v_paths = get_train_val_paths(image_folder, masks_folder)
val_gen = img_batch_generator(v_paths["val_imgs"],
                              v_paths["val_mask"],
                              batch_size=2)
# img, mask = next(val_gen)
# print(img[1].shape)
model = load_model("./models/unet_model.h5",
                   custom_objects={
                       "mean_iou": mean_iou,
                       "dice_loss": dice_loss
                   })
# preds_val = model.predict(img)
# preds_val = (preds_val >= 0.5).astype(np.uint32)
# imshow(np.squeeze(preds_val[1]))
# plt.show()
# imshow(np.squeeze(mask[1]))
# plt.show()

for i, (batch_imgs, batch_masks) in enumerate(val_gen):
    batch_preds = model.predict(batch_imgs)
    batch_preds = (batch_preds >= 0.5).astype(np.uint8)
    masks = (batch_masks >= 0.5).astype(np.uint8)
    for j in range(batch_imgs.shape[0]):
        pred_arr = img_as_int(np.squeeze(batch_preds[j]))
        mask_arr = img_as_int(np.squeeze(masks[j]))
        original_img_arr = np.squeeze(batch_imgs[j])
        imsave(fname="./segments/keras/{}_{}_seg.png", arr=pred_arr)
        imsave(fname="./segments/keras/{}_{}_mask.png", arr=mask_arr)
        imsave(fname="./segments/keras/{}_{}_img.png", arr=original_img_arr)
示例#35
0
 
filter_relativ_height_sidewalk[(isnan(filter_relativ_height_sidewalk)==False)]
imshow(filter_relativ_height_sidewalk, cmap=plt.cm.gray) ; plt.show();

Z_around_sidewalk = Z * filter_relativ_height_sidewalk ; 
imshow(Z_around_sidewalk, cmap=plt.cm.gray) ;
#Z_around_sidewalk[Z_around_sidewalk!=0]
#new_viewer = viewer.ImageViewer(Z_around_sidewalk) ; new_viewer.show() ; 
tmp_min = np.min(Z_around_sidewalk[isnan(Z_around_sidewalk)==False]) ;
tmp_max = np.max(Z_around_sidewalk[isnan(Z_around_sidewalk)==False]) ;
Z_around_sidewalk= (Z_around_sidewalk - (tmp_max+tmp_min)/2 ) / (tmp_max-tmp_min)  ;

convert_to_float = img_as_float(filter_relativ_height_sidewalk) 
convert_to_float[isnan(convert_to_float)==False]

convert_to_int = img_as_int(Z_around_sidewalk) ; 
viewer.ImageViewer(convert_to_int).show() ; 
grad =  gradient(convert_to_int,disk(1)) ;
sobel_result = sobel(convert_to_int,sidewalk_mask) 
viewer.ImageViewer(sobel_result).show()

new_viewer = viewer.ImageViewer(grad) 
new_viewer += lineprofile.LineProfile()
new_viewer.show()

imshow(grad, cmap=plt.cm.gray); plt.show();


 
from skimage.morphology import disk
from skimage.filter.rank import gradient
示例#36
0
 def setUp(self):
     df = read_file('images/lung.dcm')
     self.test_image = img_as_int(df.pixel_array)