Ejemplo n.º 1
0
def closing(image):
    """Numerical closing of the image
    
    Parameters
    ----------
    image: string,
           input image
    
    returns
    -------
    filename: string,
              path of closed image
    """
    from scipy.ndimage.morphology import grey_closing
    # from nilearn.plotting import plot_anat
    import nibabel as nib
    data = nib.load(image).get_data()
    data_ = grey_closing(data, size=(3, 3, 3))
    img = nib.Nifti1Image(data_, nib.load(image).affine)
    print(np.sum((data - data_)**2))
    #cut_coords = [-45, 22, 22]
    #plot_anat(image, cut_coords=cut_coords, dim=-1)
    #plot_anat(img, cut_coords=cut_coords, dim=-1)
    filename = os.path.join(os.path.dirname(image),
                            os.path.basename(image)[:-4] + '_closed.nii.gz')
    img.to_filename(filename)
    return filename
Ejemplo n.º 2
0
def mkoutersurf(image, radius, outfile):
    #radius information is currently ignored
    #it is a little tougher to deal with the morphology in python

    fill = nib.load( image )
    filld = fill.get_data()
    filld[filld==1] = 255

    gaussian = np.ones((2,2))*.25

    image_f = np.zeros((256,256,256))

    for slice in xrange(256):
        temp = filld[:,:,slice]
        image_f[:,:,slice] = convolve(temp, gaussian, 'same')

    image2 = np.zeros((256,256,256))
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2)/2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    v, f = marching_cubes(BW2, 100)

    v2 = np.transpose(
             np.vstack( ( 128 - v[:,0],
                          v[:,2] - 128,
                          128 - v[:,1], )))
    
    write_surface(outfile, v2, f)
Ejemplo n.º 3
0
def closing(parameters):
    """Calculates morphological closing of a greyscale image.

    This is equal to performing a dilation and then an erosion.

    It wraps `scipy.ndimage.morphology.grey_closing`. The `footprint`,
    `structure`, `output`, `mode`, `cval` and `origin` options are not
    supported.

    Keep in mind that `mode` and `cval` influence the results. In this case
    the default mode is used, `reflect`.

    :param parameters['data'][0]: input array
    :type parameters['data'][0]: numpy.array
    :param parameters['size']: which neighbours to take into account, defaults
                               to (3, 3) a.k.a. numpy.ones((3, 3))
    :type parameters['size']: list

    :return: numpy.array

    """
    data = parameters['data'][0]
    size = tuple(parameters['size'])

    return morphology.grey_closing(data, size=size)
Ejemplo n.º 4
0
 def skeletonize(self, image):
     image = grey_closing(image, footprint=circle(8), mode='constant', cval=0.0)
     image = add_zero_mat(image)
     prev_binary_image = np.zeros_like(image)
     
     image_bit_depth = (image.dtype.itemsize * 8) / 2
     print "image_bit_depth: " + str(image_bit_depth)
     
     #image_thresholds = range(2**image_bit_depth,-1,-16)
     image_thresholds = [2**x for x in range(image_bit_depth, 3, -1)] + range(15, 0, -1)
     print "image_thresholds: " + str(image_thresholds)
     
     for curr_threshold in image_thresholds:
         print "curr_threshold: " + str(curr_threshold)
         
         curr_thresh_image = threshold(image, curr_threshold)
         
         curr_binary_image = curr_thresh_image.astype(np.bool).astype(np.int)
         imsave(skeleton_images_path + "binary_" + str(curr_threshold) + ".png", curr_binary_image)
         
         curr_sum_image = (prev_binary_image + curr_binary_image)
         curr_skeleton_image = self.thin_pixels(curr_sum_image)
         imsave(skeleton_images_path + "skeleton_" + str(curr_threshold) + ".png", curr_skeleton_image)
         print "curr_skeleton max: " + str(curr_skeleton_image.max())
         
         prev_binary_image = curr_skeleton_image
     
     return remove_zero_mat(prev_binary_image)
Ejemplo n.º 5
0
    def processAlgorithm(self, parameters, context, feedback):
        """Here is where the processing itself takes place."""

        INPUT_RASTER = self.parameterAsRasterLayer(parameters,
                                                   self.INPUT_RASTER, context)
        #INPUT_RASTER = self.getParameterValue(self.INPUT_RASTER)
        OUTPUT_RASTER = self.parameterAsOutputLayer(parameters,
                                                    self.OUTPUT_RASTER,
                                                    context)
        CLOSING_SIZE = self.parameterAsInt(parameters, self.CLOSING_SIZE,
                                           context)
        """
        MEDIAN_ITER = self.parameterAsInt(parameters, self.MEDIAN_ITER, context)
        MEDIAN_SIZE = self.parameterAsInt(parameters, self.MEDIAN_SIZE, context)
        # First we create the output layer. The output value entered by
        # the user is a string containing a filename, so we can use it
        # directly

        #from scipy import ndimage
        #import gdal
        """
        INPUT_RASTER_src = INPUT_RASTER.source()

        # feedback.pushInfo(str(OUTPUT_RASTER))
        #QgsMessageLog.logMessage('output is: '+str(OUTPUT_RASTER))

        from scipy.ndimage.morphology import grey_closing

        data, im = dataraster.open_data_band(INPUT_RASTER_src)

        proj = data.GetProjection()
        geo = data.GetGeoTransform()
        d = data.RasterCount

        total = 100 / (d * 1)

        outFile = dataraster.create_empty_tiff(OUTPUT_RASTER, im, d, geo, proj)

        iterPos = 0

        for i in range(d):
            # Read data from the right band
            # pbNow+=1
            # pb.setValue(pbNow)

            tempBand = data.GetRasterBand(i + 1).ReadAsArray()

            tempBand = grey_closing(tempBand,
                                    size=(CLOSING_SIZE, CLOSING_SIZE))
            #tempBand = tempBand
            feedback.setProgress(int(i * total))

            # Save bandand outFile
            out = outFile.GetRasterBand(i + 1)
            out.WriteArray(tempBand)
            out.FlushCache()
            tempBand = None

        return {self.OUTPUT_RASTER: OUTPUT_RASTER}
Ejemplo n.º 6
0
def Filtro_opening(matrix_imagem):
    #gaussian gradiente de magnitude

    imagens_filtrada1 = grey_opening(matrix_imagem, size=5)
    #imagens_filtrada2=grey_opening(imagens_filtrada1,size=5)
    imagens_filtrada = grey_closing(imagens_filtrada1, size=5)

    return imagens_filtrada
Ejemplo n.º 7
0
def get_segmentation_mask(I, mask_color=(1., 1., 1.)):
    channel_masks = I.copy()
    for c in range(3):
        channel_masks[:, :, c] = (I[:, :, c] == mask_color[c]).astype(int)
    mask = np.prod(channel_masks, axis=-1)
    k = np.ones((3, 3), dtype=np.float32)
    mask = spndm.grey_closing(mask, footprint=k)
    mask = spndm.grey_opening(mask, footprint=k)
    mask = np.clip(mask, 0., 1.)
    return mask
Ejemplo n.º 8
0
def get_segmentation_mask(I, mask_color=(1., 1., 1.)):
    from scipy.ndimage import morphology as spndm
    channel_masks = I.copy()
    for c in range(3):
        channel_masks[:, :, c] = (I[:, :, c] == mask_color[c]).astype(int)
    mask = np.prod(channel_masks, axis=-1)
    k = np.ones((3, 3), dtype=np.float32)
    mask = spndm.grey_closing(mask, footprint=k)
    mask = spndm.grey_opening(mask, footprint=k)
    mask = np.clip(mask, 0., 1.)
    return mask
Ejemplo n.º 9
0
def make_outer_surf(orig_pial, image, radius, outfile):
    '''
    Make outer surface based on a pial volume and radius,
    write to surface in outfile.

    Args:
        orig_pial: pial surface (e.g. lh.pial)
        image: filled lh or rh pial image (e.g. lh.pial.filled.mgz)
        radius: radius for smoothing (currently ignored)
        outfile: surface file to write data to

    Original code from ielu (https://github.com/aestrivex/ielu)
    '''

    #radius information is currently ignored
    #it is a little tougher to deal with the morphology in python

    pial_surf = nib.freesurfer.read_geometry(orig_pial, read_metadata=True)
    volume_info = pial_surf[2]

    fill = nib.load(image)
    filld = fill.get_data()
    filld[filld == 1] = 255

    gaussian = np.ones((2, 2)) * .25

    image_f = np.zeros((256, 256, 256))

    for slice in xrange(256):
        temp = filld[:, :, slice]
        image_f[:, :, slice] = convolve(temp, gaussian, 'same')

    image2 = np.zeros((256, 256, 256))
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2) / 2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    # v, f = marching_cubes(BW2, 100)
    v, f = measure.marching_cubes(BW2, 100)

    v2 = np.transpose(
        np.vstack((
            128 - v[:, 0],
            v[:, 2] - 128,
            128 - v[:, 1],
        )))

    write_surface(outfile, v2, f, volume_info=volume_info)
Ejemplo n.º 10
0
def estimate_xheight(line, scale=1.0, debug=0):
    """Estimates the xheight of a line based on image processing and
    filtering."""
    vgrad = morphology.grey_closing(line, (1, int(scale * 40)))
    vgrad = filters.gaussian_filter(vgrad, (2, int(scale * 60)), (1, 0))
    if amin(vgrad) > 0 or amax(vgrad) < 0: raise Exception("bad line")
    if debug: imshow(vgrad)
    proj = sum(vgrad, 1)
    proj = filters.gaussian_filter(proj, 0.5)
    top = argmax(proj)
    bottom = argmin(proj)
    return bottom - top, bottom
Ejemplo n.º 11
0
def estimate_xheight(line,scale=1.0,debug=0):
    """Estimates the xheight of a line based on image processing and
    filtering."""
    vgrad = morphology.grey_closing(line,(1,int(scale*40)))
    vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
    if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage("bad line")
    if debug: imshow(vgrad)
    proj = sum(vgrad,1)
    proj = filters.gaussian_filter(proj,0.5)
    top = argmax(proj)
    bottom = argmin(proj)
    return bottom-top,bottom
Ejemplo n.º 12
0
def estimate_baseline(line, order=3):
    """Compute the baseline by fitting a polynomial to the gradient.
    TODO: use robust fitting, special case very short line, limit parameter ranges"""
    line = line * 1.0 / amax(line)
    vgrad = morphology.grey_closing(line, (1, 40))
    vgrad = filters.gaussian_filter(vgrad, (2, 60), (1, 0))
    if amin(vgrad) > 0 or amax(vgrad) < 0: raise BadLine()
    h, w = vgrad.shape
    ys = argmin(vgrad, axis=0)
    xs = arange(w)
    baseline = polyfit(xs, ys, order)
    print baseline
    return baseline
Ejemplo n.º 13
0
def estimate_baseline(line,order=3):
    """Compute the baseline by fitting a polynomial to the gradient.
    TODO: use robust fitting, special case very short line, limit parameter ranges"""
    line = line*1.0/amax(line)
    vgrad = morphology.grey_closing(line,(1,40))
    vgrad = filters.gaussian_filter(vgrad,(2,60),(1,0))
    if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage()
    h,w = vgrad.shape
    ys = argmin(vgrad,axis=0)
    xs = arange(w)
    baseline = polyfit(xs,ys,order)
    print baseline
    return baseline
Ejemplo n.º 14
0
def latin_mask(line, scale=1.0, r=1.2, debug=0):
    """Estimate a mask that covers letters and diacritics of a text
    line for Latin alphabets."""
    vgrad = morphology.grey_closing(1.0 * line, (1, int(scale * 40)))
    vgrad = filters.gaussian_filter(vgrad, (2, int(scale * 60)), (1, 0))
    tops = argmax(vgrad, 0)
    bottoms = argmin(vgrad, 0)
    mask = zeros(line.shape)
    xheight = mean(bottoms - tops)
    for i in range(len(bottoms)):
        d = bottoms[i] - tops[i]
        y0 = int(maximum(0, bottoms[i] - r * d))
        mask[y0:bottoms[i], i] = 1
    return mask
Ejemplo n.º 15
0
def latin_mask(line,scale=1.0,r=1.2,debug=0):
    """Estimate a mask that covers letters and diacritics of a text
    line for Latin alphabets."""
    vgrad = morphology.grey_closing(1.0*line,(1,int(scale*40)))
    vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
    tops = argmax(vgrad,0)
    bottoms = argmin(vgrad,0)
    mask = zeros(line.shape)
    xheight = mean(bottoms-tops)
    for i in range(len(bottoms)):
        d = bottoms[i]-tops[i]
        y0 = int(maximum(0,bottoms[i]-r*d))
        mask[y0:bottoms[i],i] = 1
    return mask
Ejemplo n.º 16
0
def extract_watermark(audio_file, interactive=False):
    """
	Extracts the watermark from the spectrogram of the given audio file
	:param audio_file: path to wav file
	:param interactive: activates plotting 
	:return: watermark as text, or None if the watermark could not be extracted
	"""

    # Convert audio file to wav if necessary
    wavFile = convert_to_wav(audio_file)

    fs, data = wavfile.read(wavFile)
    data = data.astype(np.float) / np.max(np.abs(data))
    window_length = 1024
    nfft = window_length
    h = window_length // 4
    spectrogram, f, t = stft(data, window_length, h, nfft, fs)
    if interactive:
        plot_spectrogram(spectrogram)

    # Convert to PIL image in order to use optical character recognition
    # Flip upside down due to the usual way in which we view a spectrogram
    ocr_image = np.flipud(np.abs(spectrogram))

    # Do some image enhancement
    ocr_image[ocr_image < 0.2] = 0
    ocr_image = grey_closing(ocr_image, (5, 2))
    ocr_image = grey_erosion(ocr_image, (3, 5))

    # Convert to 8 bit image
    ocr_image = np.uint8(ocr_image / np.max(ocr_image) * 255 * 10)[20:120, :]
    ocr_image[ocr_image > 5] = 255

    # Enlarge image by interpolation
    # ocr_image = imresize(ocr_image, (ocr_image.shape[0] * 8, ocr_image.shape[1] * 8), interp="bilinear")

    if interactive:
        # Show for debugging purposes
        plt.imshow(ocr_image)
        plt.show()

    ocr_image = Image.fromarray(ocr_image)
    ocr_image_filename = "test.png"
    ocr_image.save(ocr_image_filename, format="png")

    # watermark = ocr.tesseract(ocr_image)
    watermark = ocr_space(ocr_image_filename)
    # ocr_image.save("test.png", format="png")
    return watermark
Ejemplo n.º 17
0
def refined_seeding(a, maximum_height=0, grey_close_radius=1, 
    binary_open_radius=1, binary_close_radius=1, minimum_size=0):
    """Perform morphological operations to get good segmentation seeds."""
    if grey_close_radius > 0:
        strel = diamond_se(grey_close_radius, a.ndim)
        a = grey_closing(a, footprint=strel)
    s = (a <= maximum_height)
    if binary_open_radius > 0:
        strel = diamond_se(binary_open_radius, s.ndim)
        s = binary_opening(s, structure=strel)
    if binary_close_radius > 0:
        strel = diamond_se(binary_close_radius, s.ndim)
        s = binary_closing(s, structure=strel)
    s = remove_small_connected_components(s, minimum_size)
    return label(s)[0]
Ejemplo n.º 18
0
def morph_filter(raw_image, closing_radius, opening_radius):

    # Create circular masks
    close_footprint, open_footprint = [[[0 for ii in range(r * 2 + 1)] for i in range(r * 2 + 1)] for r in [closing_radius, opening_radius]]
    for fp in [close_footprint, open_footprint]:
	    r = (len(fp) - 1) / 2
	    for i in range(len(fp)):
		    for ii in range(len(fp)):
			    if (i - r) ** 2 + (ii - r) ** 2 <= r ** 2: fp[i][ii] = 1

    # Perform filtering
    filtered_image = raw_image
    filtered_image = grey_closing(filtered_image, footprint=close_footprint)
    filtered_image = grey_opening(filtered_image, footprint=open_footprint)
    return filtered_image
Ejemplo n.º 19
0
def depthmap_flow_nav(d_im):
    # calculates a crude depth flow field and identifies a likely clear path 
    # by template matching to a gaussian distance function
    global cX, cY
    global yaw_error
    global est_dist

    # create nxn zeros and appropriate kernel
    kernlen = 321
    dirac_im = np.zeros((kernlen, kernlen))
    # set element at the middle to one, a dirac delta
    dirac_im[kernlen//2, kernlen//2] = 1
    
    # gaussian-smooth the dirac, resulting in a gaussian filter:
    gauss_template = cv2.GaussianBlur(dirac_im, (kernlen, kernlen), 0)
    # normalise
    max_g = max(gauss_template.max(axis=1))
    gauss_display = np.array(255*gauss_template/max_g, dtype=np.uint8)

    # filter the distance output to remove discontinuities and approximate a flow field
    d_im_filt = scmorph.grey_closing(d_im, size=(7, 7))
    blur = cv2.GaussianBlur(d_im_filt, (71, 71), 0)

    # we may want to restrict our analysis to a central 'band' of the image
    # can use a mask in the template match for this
    blur = np.array(blur, dtype=np.uint8)

    # Cross correlate a gaussian peaked function of size < image:
    template_match = cv2.matchTemplate(blur, gauss_display, cv2.TM_CCORR_NORMED)

    template_match = cv2.normalize(template_match, 0, 1, cv2.NORM_MINMAX)

    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(template_match)
    # max_loc gives top left of template match
    cX = max_loc[0] + kernlen // 2
    cY = max_loc[1] + kernlen // 2
    
    # distance: examine distance at max_loc to estimate whether we have hit a wall? This doesn't work very well!
    vis_cent = blur[(cX-8):(cX+8), (cY-8):(cY+8)]
    vis_cent = vis_cent.astype('float64')
    vis_cent[vis_cent < 5 ] = np.nan
    est_dist = np.nanmean(vis_cent) 

    yaw_error = cX - 640/2 # change this for different depth image size
Ejemplo n.º 20
0
def refined_seeding(a,
                    maximum_height=0,
                    grey_close_radius=1,
                    binary_open_radius=1,
                    binary_close_radius=1,
                    minimum_size=0):
    """Perform morphological operations to get good segmentation seeds."""
    if grey_close_radius > 0:
        strel = diamond_se(grey_close_radius, a.ndim)
        a = grey_closing(a, footprint=strel)
    s = (a <= maximum_height)
    if binary_open_radius > 0:
        strel = diamond_se(binary_open_radius, s.ndim)
        s = binary_opening(s, structure=strel)
    if binary_close_radius > 0:
        strel = diamond_se(binary_close_radius, s.ndim)
        s = binary_closing(s, structure=strel)
    s = remove_small_connected_components(s, minimum_size)
    return label(s)[0]
Ejemplo n.º 21
0
def gen(py_dev):
    running = True
    frameint = 5
    framecount = 0
    global yaw_error

    while running:
        framecount += 1

        py_dev.wait_for_frame()
        c_im = py_dev.colour
        rgb_im = c_im[..., ::-1]

        # try to scale with minimal wrap over effective range - fairly heuristic
        # Note that most colormaps give darker values to closer items
        # we may want to invert this, intuitively
        d_im = py_dev.depth * 0.05

        # close holes without removing segmentation by doing it before converting to image
        d_im_filt = scmorph.grey_closing(d_im, size=(7, 7))

        d_im_col = cv2.applyColorMap(d_im.astype(np.uint8), cv2.COLORMAP_HOT)

        # every nth frame, update direction by analysing depth map
        # two options: segmentation or gradient. Segmentation is a problem with very noisy images
        if framecount > frameint:
            #thread.start_new_thread(depthmap_seg_nav, (d_im_col, ))
            thread.start_new_thread(depthmap_flow_nav, (d_im_filt, ))
            framecount = 1

        # eventually replace this with an arrow indicating desired direction
        cv2.circle(d_im_col, (cX, cY), 7, (255, 255, 255), -1)
        cv2.putText(d_im_col, str(yaw_error), (cX - 20, cY - 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        #cd = np.concatenate((blur, gauss_template), axis=1)
        cd = np.concatenate((rgb_im, d_im_col), axis=1)
        ret, frame = cv2.imencode('.jpg', cd)
        # this is pretty slow over wifi
        jpeg_encode = frame.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + jpeg_encode + b'\r\n\r\n')
Ejemplo n.º 22
0
def mkoutersurf(image, radius, outfile):
    #radius information is currently ignored
    #it is a little tougher to deal with the morphology in python

    fill = nib.load(image)
    filld = fill.get_data()
    filld[filld == 1] = 255

    gaussian = np.ones((2, 2)) * .25

    image_f = np.zeros((256, 256, 256))

    for slice in range(256):
        temp = filld[:, :, slice]
        image_f[:, :, slice] = convolve(temp, gaussian, 'same')

    image2 = np.zeros((256, 256, 256))
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2) / 2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    v, f, _, _ = measure.marching_cubes_lewiner(BW2, 100)

    v2 = np.transpose(
        np.vstack((
            128 - v[:, 0],
            v[:, 2] - 128,
            128 - v[:, 1],
        )))

    write_surface(outfile, v2, f)
Ejemplo n.º 23
0
def make_outer_surf(
    orig_pial: Union[str, Path],
    image: Union[str, Path],
    output_fpath: Union[str, Path],
    outer_surface_sphere: float = 15,
):
    """Create outer surface of a pial volume.

    Make outer surface based on a pial volume and radius,
    write to surface in outfile.

    Parameters
    ----------
    orig_pial : str | pathlib.Path
        Pial surface (e.g. lh.pial)
    image : str | pathlib.Path
        Filled lh or rh pial image (e.g. lh.pial.filled.mgz)
    output_fpath : str | pathlib.Path
        surface file to write data to
    outer_surface_sphere : float | None
        radius for smoothing in mm (default=15). diameter of the sphere used
        by make_outer_surface to close the sulci using morphological operations.
        Ignored currently. Corresponds to ``se=strel('sphere',se_diameter);``
        in Matlab. See [1].

    References
    ----------
    .. [1] See FieldTrip Toolbox ``make_outer_surface`` function inside
    ``prepare_mesh_cortexhull.m`` file.

    .. [2] https://github.com/aestrivex/ielu
    """
    from scipy.signal import convolve
    from scipy.ndimage.morphology import grey_closing, generate_binary_structure
    from mne import write_surface
    from mcubes import marching_cubes

    # radius information is currently ignored
    # it is a little tougher to deal with the morphology in python

    # load original pial surface to get the volume information
    pial_surf = nb.freesurfer.read_geometry(orig_pial, read_metadata=True)
    volume_info = pial_surf[2]

    # load filled pial image
    fill = nb.load(image)
    filld = fill.get_data()
    filld[filld == 1] = 255

    # apply a very soft Gaussian filter with sigma = 1mm to
    # facilitate the closing
    gaussian = np.ones((2, 2)) * 0.25

    # initialize image cube array
    image_f = np.zeros((256, 256, 256))

    # initialize a thresholded image
    image2 = np.zeros((256, 256, 256))

    # for each slice, convolve the Gaussian filter on the
    # filled image
    for slice in range(256):
        temp = filld[:, :, slice]
        image_f[:, :, slice] = convolve(temp, gaussian, "same")

    # thresholded image based on value of 25
    image2[np.where(image_f <= 25)] = 0
    image2[np.where(image_f > 25)] = 255

    strel15 = generate_binary_structure(3, 1)

    # run multi-dimensional grayscale closing of the image
    BW2 = grey_closing(image2, structure=strel15)
    thresh = np.max(BW2) / 2
    BW2[np.where(BW2 <= thresh)] = 0
    BW2[np.where(BW2 > thresh)] = 255

    # apply marching cubes algorithm to get
    # vertices and faces
    v, f = marching_cubes(BW2, 100)

    # in order to cope with the different orientation
    v2 = np.transpose(
        np.vstack((
            128 - v[:, 0],
            v[:, 2] - 128,
            128 - v[:, 1],
        )))

    write_surface(output_fpath, v2, f, volume_info=volume_info)
Ejemplo n.º 24
0
def build_iscat_training(bf_filepaths, iscat_filepaths, sampling=4):
    """Creates iscat training data and target in data/iscat_seg/[REF_FRAMES / MASKS] for the iSCAT cell segmentation task
    
        ARGS:
            bf_filepaths (list(str)): filepaths of all the bright field images to input as returned by utilitiues.load_data_paths()            
            iscat_filepaths (list(str)): filepaths of all the iscat images to input as returned by utilitiues.load_data_paths()
            sampling (int): sampling interval of the saved images (lower storage footprint)
    """

    OUT_PATH = DATA_PATH + 'iscat_seg/'
    os.makedirs(os.path.join(OUT_PATH, 'REF_FRAMES/'), exist_ok=True)
    os.makedirs(os.path.join(OUT_PATH, 'MASKS/'), exist_ok=True)

    # Range of non filtered elements [px]
    min_size, max_size = 1, 13

    iscat_stacks = (utilities.load_imgs(path) for path in iscat_filepaths)
    bf_stacks = (utilities.load_imgs(path) for path in bf_filepaths)

    # Returns the metadata of the exwperiments such as frame rate
    metadatas = get_experiments_metadata(iscat_filepaths)

    if torch.cuda.is_available():
        device = torch.cuda.current_device()
        torch.cuda.set_device(device)
        print("Running on: {:s}".format(torch.cuda.get_device_name(device)))
        cuda = torch.device('cuda')
    else:
        # Doesn't run on CPU only machines comment if no GPU
        print("No CUDA device found")
        sys.exit(1)

    unet = UNetCell(1, 1, device=cuda, bilinear_upsampling=False)
    unet.load_state_dict(torch.load('outputs/saved_models/bf_unet.pth'))

    for i, (bf_stack, iscat_stack,
            metadata) in enumerate(zip(bf_stacks, iscat_stacks, metadatas)):
        if i < 45: continue

        bf_stack = bf_stack.astype('float32')
        print(bf_stack.shape)
        if bf_stack.shape[1:] != iscat_stack.shape[1:]:
            bf_stack = processing.coregister(bf_stack, 1.38)
            print(bf_stack.shape)

        normalize(bf_stack)

        # Samples iscat image to correct for the difference in framefate
        iscat_stack = iscat_stack[::sampling * int(metadata['iscat_fps'] /
                                                   metadata['tirf_fps'])]

        torch_stack = torch.from_numpy(bf_stack).unsqueeze(1).cuda()
        mask = unet.predict_stack(
            torch_stack).detach().squeeze().cpu().numpy() > 0.05
        mask = morphology.grey_erosion(mask * 255,
                                       structure=processing.structural_element(
                                           'circle', (3, 5, 5)))
        mask = morphology.grey_closing(mask,
                                       structure=processing.structural_element(
                                           'circle', (3, 7, 7)))
        mask = (mask > 50).astype('uint8')

        # Median filtering and normalization
        iscat_stack = processing.image_correction(iscat_stack)

        # Contrast enhancement
        iscat_stack = processing.enhance_contrast(iscat_stack,
                                                  'stretching',
                                                  percentile=(1, 99))

        # Fourier filtering of image
        iscat_stack = processing.fft_filtering(iscat_stack, min_size, max_size,
                                               True)
        iscat_stack = processing.enhance_contrast(iscat_stack,
                                                  'stretching',
                                                  percentile=(3, 97))

        for j in range(0, min(iscat_stack.shape[0], mask.shape[0]), sampling):
            if iscat_stack[j].shape == mask[j].shape:
                # Doesn't save images without detected cells
                if mask[j].max() == 0: continue

                print("\rSaving to stack_{}_{}.png".format(i + 1, j + 1),
                      end=' ' * 5)
                tifffile.imsave(
                    os.path.join(OUT_PATH, 'REF_FRAMES/',
                                 "stack_{}_{}.png".format(i + 1, j + 1)),
                    rescale(iscat_stack[j]))
                tifffile.imsave(
                    os.path.join(OUT_PATH, 'MASKS/',
                                 "mask_{}_{}.png".format(i + 1, j + 1)),
                    mask[j] * 255)
            else:
                print("Error, shape: {}, {}".format(iscat_stack[j].shape,
                                                    mask[j].shape))
                break

        print('')
Ejemplo n.º 25
0
 def gray_closing(self, *args, **kw):
     '''see scipy.ndimage.morphology.grey_closing'''
     return Image(_morphology.grey_closing(self, *args, **kw)).convert_type(self.dtype)
Ejemplo n.º 26
0
def mark_orders(
    im,
    min_cluster=None,
    min_width=None,
    filter_size=None,
    noise=None,
    opower=4,
    border_width=None,
    degree_before_merge=2,
    regularization=0,
    closing_shape=(5, 5),
    opening_shape=(2, 2),
    plot=False,
    plot_title=None,
    manual=True,
    auto_merge_threshold=0.9,
    merge_min_threshold=0.1,
    sigma=0,
):
    """Identify and trace orders

    Parameters
    ----------
    im : array[nrow, ncol]
        order definition image
    min_cluster : int, optional
        minimum cluster size in pixels (default: 500)
    filter_size : int, optional
        size of the running filter (default: 120)
    noise : float, optional
        noise to filter out (default: 8)
    opower : int, optional
        polynomial degree of the order fit (default: 4)
    border_width : int, optional
        number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
    plot : bool, optional
        wether to plot the final order fits (default: False)
    manual : bool, optional
        wether to manually select clusters to merge (strongly recommended) (default: True)

    Returns
    -------
    orders : array[nord, opower+1]
        order tracing coefficients (in numpy order, i.e. largest exponent first)
    """

    # Convert to signed integer, to avoid underflow problems
    im = np.asanyarray(im)
    im = im.astype(int)

    if filter_size is None:
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        threshold = np.percentile(col, 90)
        npeaks = find_peaks(col, height=threshold)[0].size
        filter_size = im.shape[0] // (npeaks * 2)
        logger.info("Median filter size, estimated: %i", filter_size)
    elif filter_size <= 0:
        raise ValueError(f"Expected filter size > 0, but got {filter_size}")

    if border_width is None:
        # find width of orders, based on central column
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        idx = np.argmax(col)
        width = peak_widths(col, [idx])[0][0]
        border_width = int(np.ceil(width))
        logger.info("Image border width, estimated: %i", border_width)
    elif border_width < 0:
        raise ValueError(f"Expected border width > 0, but got {border_width}")

    if min_cluster is None:
        min_cluster = im.shape[1] // 4
        logger.info("Minimum cluster size, estimated: %i", min_cluster)
    elif not np.isscalar(min_cluster):
        raise TypeError(
            f"Expected scalar minimum cluster size, but got {min_cluster}")

    if min_width is None:
        min_width = 0.25
    if min_width == 0:
        pass
    elif isinstance(min_width, (float, np.floating)):
        min_width = int(min_width * im.shape[0])
        logger.info("Minimum order width, estimated: %i", min_width)

    # im[im < 0] = np.ma.masked
    blurred = np.ma.filled(im, fill_value=0)
    blurred = grey_closing(blurred, 5)
    # blur image along columns, and use the median + blurred + noise as threshold
    blurred = gaussian_filter1d(blurred, filter_size, axis=0)

    if noise is None:
        tmp = np.abs(blurred.flatten())
        noise = np.percentile(tmp, 5)
        logger.info("Background noise, estimated: %f", noise)
    elif not np.isscalar(noise):
        raise TypeError(f"Expected scalar noise level, but got {noise}")

    mask = im > blurred + noise
    # remove borders
    if border_width != 0:
        mask[:border_width, :] = mask[-border_width:, :] = False
        mask[:, :border_width] = mask[:, -border_width:] = False
    # remove masked areas with no clusters
    mask = np.ma.filled(mask, fill_value=False)
    # close gaps inbetween clusters
    struct = np.full(closing_shape, 1)
    mask = morphology.binary_closing(mask, struct, border_value=1)
    # remove small lonely clusters
    struct = np.full(opening_shape, 1)
    # struct = morphology.generate_binary_structure(2, 1)
    mask = morphology.binary_opening(mask, struct)

    # label clusters
    clusters, _ = label(mask)

    # remove small clusters
    sizes = np.bincount(clusters.ravel())
    mask_sizes = sizes > min_cluster
    mask_sizes[
        0] = True  # This is the background, which we don't need to remove
    for i in np.arange(len(sizes))[~mask_sizes]:
        clusters[clusters == i] = 0

    # # Reorganize x, y, clusters into a more convenient "pythonic" format
    # # x, y become dictionaries, with an entry for each order
    # # n is just a list of all orders (ignore cluster == 0)
    n = np.unique(clusters)
    n = n[n != 0]
    x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
    y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}

    def best_fit_degree(x, y):
        L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x)**2)
        L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x)**2)

        # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
        # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)

        if L1 < L2:
            return 1
        else:
            return 2

    if sigma > 0:
        degree = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
        bias = {i: np.polyfit(y[i], x[i], deg=degree[i])[-1] for i in x.keys()}
        n = list(x.keys())
        yt = np.concatenate([y[i] for i in n])
        xt = np.concatenate([x[i] - bias[i] for i in n])
        coef = np.polyfit(yt, xt, deg=degree_before_merge)

        res = np.polyval(coef, yt)
        cutoff = sigma * (res - xt).std()

        # DEBUG plot
        # uy = np.unique(yt)
        # mask = np.abs(res - xt) > cutoff
        # plt.plot(yt, xt, ".")
        # plt.plot(yt[mask], xt[mask], "r.")
        # plt.plot(uy, np.polyval(coef, uy))
        # plt.show()
        #

        m = {
            i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
            for i in x.keys()
        }

        k = max(x.keys()) + 1
        for i in range(1, k):
            new_img = np.zeros(im.shape, dtype=int)
            new_img[x[i][~m[i]], y[i][~m[i]]] = 1
            clusters, _ = label(new_img)

            x[i] = x[i][m[i]]
            y[i] = y[i][m[i]]
            if len(x[i]) == 0:
                del x[i], y[i]

            nnew = np.max(clusters)
            if nnew != 0:
                xidx, yidx = np.indices(im.shape)
                for j in range(1, nnew + 1):
                    xn = xidx[clusters == j]
                    yn = yidx[clusters == j]
                    if xn.size >= min_cluster:
                        x[k] = xn
                        y[k] = yn
                        k += 1
                # plt.imshow(clusters, origin="lower")
                # plt.show()

    if plot:  # pragma: no cover
        title = "Identified clusters"
        if plot_title is not None:
            title = f"{plot_title}\n{title}"
        plt.title(title)
        plt.xlabel("x [pixel]")
        plt.ylabel("y [pixel]")
        clusters = np.ma.zeros(im.shape, dtype=int)
        for i in x.keys():
            clusters[x[i], y[i]] = i + 1
        clusters[clusters == 0] = np.ma.masked

        plt.imshow(clusters, origin="lower", cmap="prism")
        plt.show()

    # Merge clusters, if there are even any possible mergers left
    x, y, n = merge_clusters(
        im,
        x,
        y,
        n,
        manual=manual,
        deg=degree_before_merge,
        auto_merge_threshold=auto_merge_threshold,
        merge_min_threshold=merge_min_threshold,
        plot_title=plot_title,
    )

    if min_width > 0:
        sizes = {k: v.max() - v.min() for k, v in y.items()}
        mask_sizes = {k: v > min_width for k, v in sizes.items()}
        for k, v in mask_sizes.items():
            if not v:
                del x[k]
                del y[k]
        n = x.keys()

    orders = fit_polynomials_to_clusters(x, y, n, opower)

    # sort orders from bottom to top, using relative position

    def compare(i, j):
        _, xi, i_left, i_right = i
        _, xj, j_left, j_right = j

        if i_right < j_left or j_right < i_left:
            return xi.mean() - xj.mean()

        left = max(i_left, j_left)
        right = min(i_right, j_right)

        return xi[left:right].mean() - xj[left:right].mean()

    xp = np.arange(im.shape[1])
    keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max())
            for c in x.keys()]
    keys = sorted(keys, key=cmp_to_key(compare))
    key = [k[0] for k in keys]

    n = np.arange(len(n), dtype=int)
    x = {c: x[key[c]] for c in n}
    y = {c: y[key[c]] for c in n}
    orders = np.array([orders[key[c]] for c in n])

    column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])

    if plot:  # pragma: no cover
        plot_orders(im, x, y, n, orders, column_range, title=plot_title)

    return orders, column_range
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)
    
    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)
    
    logger.info('Executing weighted viscous morphology with {} ({} bins).'.format(','.join(map(str, args.func)), len(args.func)))
        
    # iterate over input images
    for image in args.images:
        
        # build output file name
        image_viscous_name = args.folder + '/' + image.split('/')[-1][:-4] + '_wviscous_' + '_'.join(map(str, args.func))
        image_viscous_name += image.split('/')[-1][-4:]
        
        # check if output file exists
        if not args.force:
            if os.path.exists(image_viscous_name):
                logger.warning('The output file {} already exists. Skipping this image.'.format(image_viscous_name))
                continue
        
        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)
        
        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())
        
        # prepare result image and extract required attributes of input image
        if args.debug:
            logger.debug('Intensity range of gradient image is ({}, {})'.format(image_gradient_data.min(), image_gradient_data.max()))
        
        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, len(args.func))
        logger.debug('{} bins created'.format(len(bins) -1))
        
        # check if the number of bins is consistent
        if len(args.func) != len(bins) - 1:
            raise Exception('Inconsistency between the number of requested and created bins ({} to {})'.format(args.sections, len(bins) - 1))
        
        # prepare result file
        image_viscous_data = image_gradient_data
        
        # transform the gradient images topography
        logger.info('Applying the viscous morphological operations on {} sections...'.format(len(args.func)))
        for sl in range(1, len(args.func) + 1):
            
            # create sphere to use in this step
            if 0 >= args.func[sl - 1]: continue # sphere of sizes 0 or below lead to no changes and are not executed
            sphere = iterate_structure(generate_binary_structure(3, 1), args.func[sl - 1]).astype(scipy.int_)
            
            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[sl]) # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[sl - 1]) # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower) # all voxels in the current slice
            
            # extract slice
            image_threshold_data = image_gradient_data.copy()
            image_threshold_data[mask_lower] = 0 # set all voxels under the current slice to zero
            image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max() # set all voxels over the current slice to the max of all voxels in the current slice
            
            logger.debug('{} of {} voxels belong to this level.'.format(len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)))            
            
            # apply the closing with the appropriate sphere
            logger.debug('Applying a disk of {} to all values >= {} and < {} (sec {})...'.format(args.func[sl - 1], bins[sl - 1],  bins[sl], sl))
            image_closed_data = grey_closing(image_threshold_data, footprint=sphere)
            
            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)
                    
        # save resulting gradient image
        logger.info('Saving resulting gradient image as {}...'.format(image_viscous_name))
        image_viscous = image_like(image_viscous_data, image_gradient)
        save(image_viscous, image_viscous_name)
            
    logger.info('Successfully terminated.')
Ejemplo n.º 28
0
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    logger.info('Selected viscous type is {}'.format(args.type))

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        logger.debug('Intensity range of gradient image is ({}, {})'.format(
            image_gradient_data.min(), image_gradient_data.max()))

        # build output file name and check for its existence, if not in sections mode
        if 'sections' != args.type:
            # build output file name
            image_viscous_name = args.folder + '/' + image.split(
                '/')[-1][:-4] + '_viscous_{}_sec_{}_ds_{}'.format(
                    args.type, args.sections, args.dsize)
            image_viscous_name += image.split('/')[-1][-4:]

            # check if output file exists
            if not args.force:
                if os.path.exists(image_viscous_name):
                    logger.warning(
                        'The output file {} already exists. Skipping this image.'
                        .format(image_viscous_name))
                    continue

        # execute plain closing i.e. a closing operation over the whole image, if in plain mode
        if 'plain' == args.type:
            # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1))
            disc = iterate_structure(generate_binary_structure(3, 1),
                                     args.dsize).astype(scipy.int_)

            # apply closing
            logger.info('Applying the morphology over whole image at once...')
            image_viscous_data = grey_closing(image_gradient_data,
                                              footprint=disc)

            # save resulting gradient image
            logger.info('Saving resulting gradient image as {}...'.format(
                image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

            # skip other morphologies
            continue

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, args.sections)
        logger.debug('{} bins created'.format(len(bins) - 1))

        # check if the number of bins is consistent
        if args.sections != len(bins) - 1:
            raise Exception(
                'Inconsistency between the number of requested and created bins ({} to {})'
                .format(args.sections,
                        len(bins) - 1))

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice]
        logger.info(
            'Applying the viscous morphological operations {} times...'.format(
                args.sections))
        for slice in range(1, args.sections + 1):

            # build output file name and check for its existence, if in sections mode
            if 'sections' == args.type:
                # build output file name
                image_viscous_name = args.folder + '/' + image.split(
                    '/')[-1][:-4] + '_viscous_{}_sec_{}_ds_{}_sl_{}'.format(
                        args.type, args.sections, args.dsize, slice)
                image_viscous_name += image.split('/')[-1][-4:]

                # check if output file exists
                if not args.force:
                    if os.path.exists(image_viscous_name):
                        logger.warning(
                            'The output file {} already exists. Skipping this slice.'
                            .format(image_viscous_name))
                        continue

                # prepare result file
                image_viscous_data = image_gradient_data

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[slice]
                            )  # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[slice - 1]
                          )  # all voxels which are under the current slice
            mask_equal = scipy.invert(
                mask_greater | mask_lower)  # all voxels in the current slice
            if 'mercury' == args.type:
                dsize = int((args.dsize / float(args.sections)) * (slice))
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         dsize).astype(scipy.int_)
                mask_equal_or_greater = mask_equal | mask_greater
                image_threshold_data = image_gradient_data * mask_equal_or_greater
            elif 'oil' == args.type:
                dsize = int((args.dsize / float(args.sections)) *
                            (args.sections - slice + 1))
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                mask_equal_or_lower = mask_equal | mask_lower
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[
                    mask_equal_or_lower].max()
            elif 'sections' == args.type:
                dsize = args.dsize
                disc = iterate_structure(generate_binary_structure(3, 1),
                                         args.dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                # set all voxels under the current slice to zero
                image_threshold_data[mask_lower] = 0
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[
                    mask_equal].max()

            logger.debug('{} of {} voxels belong to this level.'.format(
                len(mask_equal.nonzero()[0]),
                scipy.prod(image_threshold_data.shape)))

            # apply the closing with the appropriate disc size
            logger.debug(
                'Applying a disk of {} to all values >= {} and < {}...'.format(
                    dsize, bins[slice - 1], bins[slice]))
            image_closed_data = grey_closing(image_threshold_data,
                                             footprint=disc)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data,
                                               image_closed_data)

            # save created output file, if in sections mode
            if 'sections' == args.type:
                # save resulting gradient image
                logger.info('Saving resulting gradient image as {}...'.format(
                    image_viscous_name))
                image_viscous = image_like(image_viscous_data, image_gradient)
                save(image_viscous, image_viscous_name)

        # save created output file, if not in sections mode
        if 'sections' != args.type:
            # save resulting gradient image
            logger.info('Saving resulting gradient image as {}...'.format(
                image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

    logger.info('Successfully terminated.')
Ejemplo n.º 29
0
def main(*args):
    if(len(args) < 3):
        return 1

    git_hash = check_output(['git','rev-parse','HEAD']).strip()

    for i in range(2,len(args),2):

        nlevels = 256

        seed = random.randint(0, sys.maxint)
        random.seed(seed)
        np.random.seed(seed)

        r = np.array(range(0,nlevels))
        source = pickle.load(open(args[1],'rb'))

        gt_path = args[i]
        out_path = args[i+1]
        seed_path = os.path.splitext(out_path)[0]+'.seed'
        ground = np.genfromtxt(gt_path,dtype='int16')
        ground_edges = edges(ground)

        with open(seed_path,'wb') as f:
            f.write(str(seed)+' '+git_hash)

        ncircles = random.randint(0,5)
        print('Number of circles: ' + str(ncircles))
        nlines = random.randint(0,2)
        print('Number of lines: ' + str(nlines))
        nscratches = random.randint(ground.min(),ground.max())
        print('Number of scratches: ' + str(nscratches))

        out = np.zeros(ground.shape,dtype='int16')

        dt = distance_transform_edt(np.logical_not(ground_edges))

        # supress edges
        for p,e in edge_list(ground):
            # lower threshold on edge "length"
            if e.sum() < 50:
                continue
            s = binary_dilation(e,iterations=3)
            dt[s] += random.choice([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25, 0.5, 1])

#        for c in [ (random.randint(0,ground.shape[0]), 
#                    random.randint(0,ground.shape[1]), 
#                    random.randint(25,75)) 
#                   for k in range(0,ncircles) ]:
#            dt[matsci.adj.circle(c,out.shape)] += 1

        dt = dt.astype('int16')

        out[dt==0] = normsamp(source['dist_raw'][0], len(out[dt==0]), 255,128)

        out = grey_closing(out, size=(3,3))

        for d in source['dist']:
            if not d==0:
                out[dt==d] = histsamp(source['dist'][d], len(out[dt==d]))

        # fill in the rest of the pixels with value from last sampling
        m = max(source['dist'].keys())
        out[dt>=m-1] = histsamp(source['dist'][m], len(out[dt>=m-1]))

        # create varying intensities within grains
        for k in range(0,ground.max()):
            out[ground==k] += histsamp(source['grain_all'],1)[0]

        for l in [ binary_dilation(
                line( (random.randint(0,ground.shape[0]), 
                       random.randint(0,ground.shape[1]))
                      , random.random() * math.pi * 2 # angle
                      , int(math.sqrt(math.pow(ground.shape[0],2)+
                                      math.pow(ground.shape[1],2))) + 1 # length
                      , ground.shape))
                   for k in range(0,nlines) ]:
            out[l] += np.clip(map(int,map(round,norm.rvs(loc=64, 
                                                         scale=32, 
                                                         size=len(out[l]))))
                              ,0
                              ,255).astype('int16')

        # inter-grain scratches
        for k in random.sample(range(0,ground.max()), nscratches):
            reg = ground==k
            if reg.sum() < 500:
                continue
            c = scipy.ndimage.measurements.center_of_mass(reg)
            p = matsci.label.fit_region_z(reg)
            w = abs(p[0]-p[2])
            h = abs(p[1]-p[3])
            largest = int(round(min(w,h)/2))
            angle = random.random() * math.pi * 2
            val = random.randint(2,48)
            for j in range(1,random.randint(1,25)):
                l = np.logical_and(
                    line(
                        (random.randint(int(c[1]-h/2),
                                        int(c[1]+h/2)),
                         random.randint(int(c[0]-w/2),
                                        int(c[0]+w/2)))
                        , angle
                        , random.randint(2,max(3,largest))
                        , ground.shape)
                    , reg)
                out[l] += np.clip(
                    map(int,
                        map(round,
                            norm.rvs(
                                loc=val,
                                scale=32, 
                                size=len(out[l])))),
                    0,
                    255
                    ).astype('int16')
                
        out = np.clip(out,0,255).astype('uint8')

    # out = gaussian_filter(out, sigma=1)
    # out = median_filter(out, 3)

        cv2.imwrite(out_path,out)

    return 0
Ejemplo n.º 30
0
    def distort(imgae, config):
        """ 向图像中添加噪声
        这个函数修改自gqcnn的源程序中,具体原理参考论文
        """
        imgae_ = imgae.copy()
        # config = self._config
        im_height = imgae_.shape[0]
        im_width = imgae_.shape[1]
        im_center = np.array([float(im_height-1)/2, float(im_width-1)/2])
        # denoising and synthetic data generation
        if config['multiplicative_denoising']:
            gamma_shape = config['gamma_shape']
            gamma_scale = 1.0 / gamma_shape
            mult_samples = ss.gamma.rvs(gamma_shape, scale=gamma_scale)
            imgae_ = imgae_ * mult_samples

        # randomly dropout regions of the image for robustness
        if config['image_dropout']:
            if np.random.rand() < config['image_dropout_rate']:
                nonzero_px = np.where(imgae_ > 0)
                nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
                num_nonzero = nonzero_px.shape[0]
                num_dropout_regions = ss.poisson.rvs(
                    config['dropout_poisson_mean'])

                # sample ellipses
                dropout_centers = np.random.choice(
                    num_nonzero, size=num_dropout_regions)
                x_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)
                y_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)

                # set interior pixels to zero
                for j in range(num_dropout_regions):
                    ind = dropout_centers[j]
                    dropout_center = nonzero_px[ind, :]
                    x_radius = x_radii[j]
                    y_radius = y_radii[j]
                    dropout_px_y, dropout_px_x = sd.ellipse(
                        dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                    imgae_[dropout_px_y, dropout_px_x] = 0.0

        # dropout a region around the areas of the image with high gradient
        if config['gradient_dropout']:
            if np.random.rand() < config['gradient_dropout_rate']:
                grad_mag = sf.gaussian_gradient_magnitude(
                    imgae_, sigma=config['gradient_dropout_sigma'])
                thresh = ss.gamma.rvs(
                    config['gradient_dropout_shape'], config['gradient_dropout_scale'], size=1)
                high_gradient_px = np.where(grad_mag > thresh)
                imgae_[high_gradient_px[0], high_gradient_px[1]] = 0.0

        # add correlated Gaussian noise
        if config['gaussian_process_denoising']:
            gp_rescale_factor = config['gaussian_process_scaling_factor']
            gp_sample_height = int(im_height / gp_rescale_factor)
            gp_sample_width = int(im_width / gp_rescale_factor)
            gp_num_pix = gp_sample_height * gp_sample_width
            if np.random.rand() < config['gaussian_process_rate']:
                gp_noise = ss.norm.rvs(scale=config['gaussian_process_sigma'], size=gp_num_pix).reshape(
                    gp_sample_height, gp_sample_width)
                # sm.imresize 有警告将被弃用
                # gp_noise = sm.imresize(
                #     gp_noise, gp_rescale_factor, interp='bicubic', mode='F')
                # st.resize 用来替用将被弃用的sm.imresize
                # gp_noise = st.resize(gp_noise, (im_height, im_width))
                gp_noise = cv2.resize(
                    gp_noise, (im_height, im_width), interpolation=cv2.INTER_CUBIC)
                imgae_[imgae_ > 0] += gp_noise[imgae_ > 0]

        # run open and close filters to
        if config['morphological']:
            sample = np.random.rand()
            morph_filter_dim = ss.poisson.rvs(
                config['morph_poisson_mean'])
            if sample < config['morph_open_rate']:
                imgae_ = snm.grey_opening(
                    imgae_, size=morph_filter_dim)
            else:
                closed_imgae_ = snm.grey_closing(
                    imgae_, size=morph_filter_dim)

                # set new closed pixels to the minimum depth, mimicing the table
                new_nonzero_px = np.where(
                    (imgae_ == 0) & (closed_imgae_ > 0))
                closed_imgae_[new_nonzero_px[0], new_nonzero_px[1]] = np.min(
                    imgae_[imgae_ > 0])
                imgae_ = closed_imgae_.copy()

        # randomly dropout borders of the image for robustness
        if config['border_distortion']:
            grad_mag = sf.gaussian_gradient_magnitude(
                imgae_, sigma=config['border_grad_sigma'])
            high_gradient_px = np.where(
                grad_mag > config['border_grad_thresh'])
            high_gradient_px = np.c_[
                high_gradient_px[0], high_gradient_px[1]]
            num_nonzero = high_gradient_px.shape[0]
            num_dropout_regions = ss.poisson.rvs(
                config['border_poisson_mean'])

            # sample ellipses
            dropout_centers = np.random.choice(
                num_nonzero, size=num_dropout_regions)
            x_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)
            y_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)

            # set interior pixels to zero or one
            for j in range(num_dropout_regions):
                ind = dropout_centers[j]
                dropout_center = high_gradient_px[ind, :]
                x_radius = x_radii[j]
                y_radius = y_radii[j]
                dropout_px_y, dropout_px_x = sd.ellipse(
                    dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                if np.random.rand() < 0.5:
                    imgae_[dropout_px_y, dropout_px_x] = 0.0
                else:
                    imgae_[dropout_px_y, dropout_px_x] = imgae_[
                        dropout_center[0], dropout_center[1]]

        # randomly replace background pixels with constant depth
        if config['background_denoising']:
            if np.random.rand() < config['background_rate']:
                imgae_[imgae_ > 0] = config['background_min_depth'] + (
                    config['background_max_depth'] - config['background_min_depth']) * np.random.rand()

        # symmetrize images
        if config['symmetrize']:
            # rotate with 50% probability
            if np.random.rand() < 0.5:
                theta = 180.0
                rot_map = cv2.getRotationMatrix2D(
                    tuple(im_center), theta, 1)
                imgae_ = cv2.warpAffine(
                    imgae_, rot_map, (im_height, im_width), flags=cv2.INTER_NEAREST)
            # reflect left right with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.fliplr(imgae_)
            # reflect up down with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.flipud(imgae_)
        return imgae_
Ejemplo n.º 31
0
def close_segmentation(segmentation, size, **kwargs):
    '''close holes in segmentation maps for training.

    '''
    return grey_closing(segmentation, size=size, **kwargs)
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    logger.info(
        'Executing weighted viscous morphology with {} ({} bins).'.format(
            ','.join(map(str, args.func)), len(args.func)))

    # iterate over input images
    for image in args.images:

        # build output file name
        image_viscous_name = args.folder + '/' + image.split(
            '/')[-1][:-4] + '_wviscous_' + '_'.join(map(str, args.func))
        image_viscous_name += image.split('/')[-1][-4:]

        # check if output file exists
        if not args.force:
            if os.path.exists(image_viscous_name):
                logger.warning(
                    'The output file {} already exists. Skipping this image.'.
                    format(image_viscous_name))
                continue

        # get and prepare image data
        logger.info('Loading image {} using NiBabel...'.format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        # prepare result image and extract required attributes of input image
        if args.debug:
            logger.debug(
                'Intensity range of gradient image is ({}, {})'.format(
                    image_gradient_data.min(), image_gradient_data.max()))

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, len(args.func))
        logger.debug('{} bins created'.format(len(bins) - 1))

        # check if the number of bins is consistent
        if len(args.func) != len(bins) - 1:
            raise Exception(
                'Inconsistency between the number of requested and created bins ({} to {})'
                .format(args.sections,
                        len(bins) - 1))

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography
        logger.info(
            'Applying the viscous morphological operations on {} sections...'.
            format(len(args.func)))
        for sl in range(1, len(args.func) + 1):

            # create sphere to use in this step
            if 0 >= args.func[sl - 1]:
                continue  # sphere of sizes 0 or below lead to no changes and are not executed
            sphere = iterate_structure(generate_binary_structure(3, 1),
                                       args.func[sl - 1]).astype(scipy.int_)

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = (image_gradient_data >= bins[sl]
                            )  # all voxels with are over the current slice
            mask_lower = (image_gradient_data < bins[sl - 1]
                          )  # all voxels which are under the current slice
            mask_equal = scipy.invert(
                mask_greater | mask_lower)  # all voxels in the current slice

            # extract slice
            image_threshold_data = image_gradient_data.copy()
            image_threshold_data[
                mask_lower] = 0  # set all voxels under the current slice to zero
            image_threshold_data[mask_greater] = image_threshold_data[
                mask_equal].max(
                )  # set all voxels over the current slice to the max of all voxels in the current slice

            logger.debug('{} of {} voxels belong to this level.'.format(
                len(mask_equal.nonzero()[0]),
                scipy.prod(image_threshold_data.shape)))

            # apply the closing with the appropriate sphere
            logger.debug(
                'Applying a disk of {} to all values >= {} and < {} (sec {})...'
                .format(args.func[sl - 1], bins[sl - 1], bins[sl], sl))
            image_closed_data = grey_closing(image_threshold_data,
                                             footprint=sphere)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data,
                                               image_closed_data)

        # save resulting gradient image
        logger.info('Saving resulting gradient image as {}...'.format(
            image_viscous_name))
        image_viscous = image_like(image_viscous_data, image_gradient)
        save(image_viscous, image_viscous_name)

    logger.info('Successfully terminated.')
Ejemplo n.º 33
0
        m1.saveImage(outputFolder+ timeString() + m1.name + ".png")
        x = m.getKmeans(k =k, threshold = threshold)
        x['pattern'].saveImage(outputFolder+ getTimeString() + m.name + "threshold%d_clusters%d.png" % (threshold,k))
        res[(threshold, k)] = x['pattern']

"""
#########################################

threshold = 40
k =  10

m.load()
m1=m.threshold(0)
m1.show()
m1.matrix = mor.grey_opening(m1.matrix, 5) ####
m1.matrix = mor.grey_closing(m1.matrix, 5) ####

m1.show()
m1.backupMatrix(0)
m1.showWithCoast()
m1.saveImage(outputFolder+m.name+'grey_opening_closing5.png')
m1.restoreMatrix()

threshold=30
x = m1.getKmeans(k =k, threshold = threshold)
x['pattern'].saveImage(outputFolder+ getTimeString() + m.name + "threshold%d_clusters%d.png" % (threshold,k))
threshold=40

m.load()
m.show()
x = m.getKmeans(k =k, threshold = threshold)
Ejemplo n.º 34
0
def grey_close(im,strel=strel()):
    return morphology.grey_closing(im,structure=strel)
Ejemplo n.º 35
0
        frame_disp_count = 0

        while motorrunning:
            framecount += 1
            frame_disp_count += 1

            ## IMAGE PROCESSING
            py_dev.wait_for_frame()
            c_im = py_dev.colour
            rgb_im = c_im[..., ::-1]

            # scaling to map better to color/grayscale
            d_im = py_dev.depth * 0.05

            # close holes without removing segmentation by doing it before converting to image
            d_im_filt = scmorph.grey_closing(d_im, size=(7, 7))

            d_im_col = cv2.applyColorMap(d_im.astype(np.uint8),
                                         cv2.COLORMAP_HOT)

            # every nth frame, update direction by analysing depth map
            # two options: segmentation or gradient. Segmentation is a problem with very noisy images
            if framecount > frameint:
                yaw_e_prev = yaw_error
                # thread.start_new_thread(depthmap_seg_nav, (d_im_col, ))
                thread.start_new_thread(depthmap_flow_nav, (d_im_filt, ))
                framecount = 1

            #if frame_disp_count > frame_disp_int:
            #    # still super laggy over wifi
            #    cv2.circle(d_im_col, (cX, cY), 7, (255, 255, 255), -1)
Ejemplo n.º 36
0
def pre_process_image(img, path=None):
    if path: file_name, file_ext = os.path.splitext(os.path.basename(path))

    # MORPHOLOGY CLOSING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.grey_closing.html#scipy.ndimage.morphology.grey_closing
    # http://en.wikipedia.org/wiki/Mathematical_morphology
    #
    # Cancellare ogni lettera e imperfezione
    # Parametri: qui uso il metodo nella sua forma base dove il secondo parametro e' un rettangolo (altezza, larghezza)
    # Secondo me teoricamente dovrei matchare la dimesnione media di una parola (una lettera e' troppo piccolo, una riga e' troppo grande)
    #
    #orig#im = morphology.grey_closing(img, (1, 101))
    im = morphology.grey_closing(img, (15, 105)) #odd numbers are better
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step1%s' % (file_name, file_ext)), im)

    # OTSU THRESHOLDING (statistically optimal)
    # http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#threshold
    # http://en.wikipedia.org/wiki/Otsu%27s_Method
    #
    # Trasformare l'immagine in due colori: nero per lo sfondo, bianco per il primo piano
    # Parametri: 0, 1 (valore per sfondo e primo piano) in produzione
    # 0, 255 se volgio vedere l'immagine in bianco e nero per debug
    #
    #orig#t, im = cv.threshold(im, 0, 1, cv.THRESH_OTSU)
    t, im = cv.threshold(im, 0, 255, cv.THRESH_OTSU)
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step2%s' % (file_name, file_ext)), im)
    
    # MORPHOLOGY OPENING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.grey_opening.html#scipy.ndimage.morphology.grey_opening
    # http://en.wikipedia.org/wiki/Mathematical_morphology
    #
    # Cancellare i sottili bordi bianchi 
    # Parametri: qui uso il metodo nella sua forma base dove il secondo parametro e' un rettangolo (altezza, larghezza)
    # Le dimensioni del rettangolo sono un quadrato di lato = dimesnione minima delpiu grosso extra bordo bianco
    #
    #origl# im = morphology.grey_opening(im, (51, 51))
    im = morphology.grey_opening(im, (51, 51)) #odd numbers are better
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step3%s' % (file_name, file_ext)), im)
    
    # CONNECTED-COMPONENT LABELING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.measurements.label.html#scipy.ndimage.measurements.label
    # http://en.wikipedia.org/wiki/Connected-component_labeling
    #
    # Divido l'immagine (che ora e' teoricamente pulita di tutto il testo) in sotto immagini
    # Mantengo solo la sotto immagine piu grossa perche' dovrebbe essere la pagina (il resto lo coloro di nero)
    #
    # Il risultato puo essere:
    #  1 sotto img: caso perfetto, la sotto immagine e' il foglio (il resto e' lo sfondo che non conta, cioe' il bordo nero)
    #  2, 3, 4, 5 img: probabilmente c'e' un extra bordo bianco di disturbo su 1,2,3 o 4 lati
    #  6+ img: la pagina contiene delle immagini grosso che sono state suddivise
    #
    # Label divide l'immagine in sottoimmagini e assegna un numero ad ogni pixel (0 e' lo sfondo)
    # Tutti i pixel che hanno lo stesso numero fanno parte della stessa sotto immagine
    #
    # Restituisce:
    #  una matrice di dimensione uguale all'immagine sorgente dove ogni pixel ha un label
    #  il numero di sotto immagini identificate
    #
    lbl, ncc = label(im)
    # Identifico la sotto immagine piu grossa
    largest = 0, 0
    for i in range(1, ncc + 1):
        size = len(numpy.where(lbl == i)[0]) #counts how many times the value i is present in the lbl array
        if size > largest[1]:
            largest = i, size
    # Imposto il colore 0 a tutte le sottoimmagini tranne quell apiu grossa
    for i in range(1, ncc + 1):
        if i == largest[0]:
            continue
        im[lbl == i] = 0
        #Se volessi colorare le sotto immagini in scala di grigi
        #import math##
        #im[lbl == i] = math.floor(255/ncc-1)*ncc
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step4%s' % (file_name, file_ext)), im)###################
    return im
def main():
    # parse cmd arguments
    parser = getParser()
    parser.parse_args()
    args = getArguments(parser)

    # prepare logger
    logger = Logger.getInstance()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.verbose:
        logger.setLevel(logging.INFO)

    logger.info("Selected viscous type is {}".format(args.type))

    # iterate over input images
    for image in args.images:

        # get and prepare image data
        logger.info("Loading image {} using NiBabel...".format(image))
        image_gradient = load(image)

        # get and prepare image data
        image_gradient_data = scipy.squeeze(image_gradient.get_data())

        logger.debug(
            "Intensity range of gradient image is ({}, {})".format(image_gradient_data.min(), image_gradient_data.max())
        )

        # build output file name and check for its existence, if not in sections mode
        if "sections" != args.type:
            # build output file name
            image_viscous_name = (
                args.folder
                + "/"
                + image.split("/")[-1][:-4]
                + "_viscous_{}_sec_{}_ds_{}".format(args.type, args.sections, args.dsize)
            )
            image_viscous_name += image.split("/")[-1][-4:]

            # check if output file exists
            if not args.force:
                if os.path.exists(image_viscous_name):
                    logger.warning("The output file {} already exists. Skipping this image.".format(image_viscous_name))
                    continue

        # execute plain closing i.e. a closing operation over the whole image, if in plain mode
        if "plain" == args.type:
            # prepare the disc structure (a ball with a diameter of (args.dsize * 2 + 1))
            disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)

            # apply closing
            logger.info("Applying the morphology over whole image at once...")
            image_viscous_data = grey_closing(image_gradient_data, footprint=disc)

            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

            # skip other morphologies
            continue

        # create gradient images flattened histogram
        bins = hist_flatened(image_gradient_data, args.sections)
        logger.debug("{} bins created".format(len(bins) - 1))

        # check if the number of bins is consistent
        if args.sections != len(bins) - 1:
            raise Exception(
                "Inconsistency between the number of requested and created bins ({} to {})".format(
                    args.sections, len(bins) - 1
                )
            )

        # prepare result file
        image_viscous_data = image_gradient_data

        # transform the gradient images topography (Note: the content of one bin is: bins[slice - 1] <= content < bins[slice]
        logger.info("Applying the viscous morphological operations {} times...".format(args.sections))
        for slice in range(1, args.sections + 1):

            # build output file name and check for its existence, if in sections mode
            if "sections" == args.type:
                # build output file name
                image_viscous_name = (
                    args.folder
                    + "/"
                    + image.split("/")[-1][:-4]
                    + "_viscous_{}_sec_{}_ds_{}_sl_{}".format(args.type, args.sections, args.dsize, slice)
                )
                image_viscous_name += image.split("/")[-1][-4:]

                # check if output file exists
                if not args.force:
                    if os.path.exists(image_viscous_name):
                        logger.warning(
                            "The output file {} already exists. Skipping this slice.".format(image_viscous_name)
                        )
                        continue

                # prepare result file
                image_viscous_data = image_gradient_data

            # create masks to extract the affected voxels (i.e. the current slice of the topographic image representation)
            mask_greater = image_gradient_data >= bins[slice]  # all voxels with are over the current slice
            mask_lower = image_gradient_data < bins[slice - 1]  # all voxels which are under the current slice
            mask_equal = scipy.invert(mask_greater | mask_lower)  # all voxels in the current slice
            if "mercury" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (slice))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                mask_equal_or_greater = mask_equal | mask_greater
                image_threshold_data = image_gradient_data * mask_equal_or_greater
            elif "oil" == args.type:
                dsize = int((args.dsize / float(args.sections)) * (args.sections - slice + 1))
                disc = iterate_structure(generate_binary_structure(3, 1), dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                mask_equal_or_lower = mask_equal | mask_lower
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal_or_lower].max()
            elif "sections" == args.type:
                dsize = args.dsize
                disc = iterate_structure(generate_binary_structure(3, 1), args.dsize).astype(scipy.int_)
                image_threshold_data = image_gradient_data.copy()
                # set all voxels under the current slice to zero
                image_threshold_data[mask_lower] = 0
                # set all voxels over the current slice to the max of all voxels in the current slice
                image_threshold_data[mask_greater] = image_threshold_data[mask_equal].max()

            logger.debug(
                "{} of {} voxels belong to this level.".format(
                    len(mask_equal.nonzero()[0]), scipy.prod(image_threshold_data.shape)
                )
            )

            # apply the closing with the appropriate disc size
            logger.debug(
                "Applying a disk of {} to all values >= {} and < {}...".format(dsize, bins[slice - 1], bins[slice])
            )
            image_closed_data = grey_closing(image_threshold_data, footprint=disc)

            # add result of this slice to the general results
            image_viscous_data = scipy.maximum(image_viscous_data, image_closed_data)

            # save created output file, if in sections mode
            if "sections" == args.type:
                # save resulting gradient image
                logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
                image_viscous = image_like(image_viscous_data, image_gradient)
                save(image_viscous, image_viscous_name)

        # save created output file, if not in sections mode
        if "sections" != args.type:
            # save resulting gradient image
            logger.info("Saving resulting gradient image as {}...".format(image_viscous_name))
            image_viscous = image_like(image_viscous_data, image_gradient)
            save(image_viscous, image_viscous_name)

    logger.info("Successfully terminated.")
Ejemplo n.º 38
0
        pl.save("Stretching", Data)

if(opening):
    print "- Morphological Opening..."
    Data=grey_opening(Data, structure=Cross)
    if(save):
        pl.save("Opening", Data)

if(erosion):
    print "- Morphological Erosion..."
    Data=grey_erosion(Data, structure=Cross)
    if(save):
        pl.save("Erosion", Data)
if(closing):
    print "- Morphological Closing..."
    Data=grey_closing(Data, structure=Cross)
    if(save):
        pl.save("Closing", Data)

# Remark: one could keep on with other transformations, other kernels and so on
# To do so, I would reccomend to use ipython, and eventually load the partial results 

FinalStep=Data

if(view):
    view_slice(FinalStep,SizeX/2)
    pl.show()
print "- Finding maxima..."
local_max=maximum_filter(FinalStep, size=(cutoff/2, cutoff/2, cutoff/2))==FinalStep
Labelled, Num=label(local_max)
print "\n====> Found"+F.RED, Num,F.RESET+"maxima\n"
Ejemplo n.º 39
0
def orthorectify(args_source_image, args_dsm, args_destination_image,
                 args_occlusion_thresh=1.0, args_denoise_radius=2,
                 args_raytheon_rpc=None, args_dtm=None):
    """
    Orthorectify an image given the DSM

    Args:
        source_image: Source image file name
        dsm: Digital surface model (DSM) image file name
        destination_image: Orthorectified image file name
        occlusion-thresh: Threshold on height difference for detecting
                          and masking occluded regions (in meters)
        denoise-radius: Apply morphological operations with this radius
                        to the DSM reduce speckled noise
        raytheon-rpc: Raytheon RPC file name. If not provided
                      the RPC is read from the source_image

    Returns:
        COMPLETE_DSM_INTERSECTION = 0
        PARTIAL_DSM_INTERSECTION = 1
        EMPTY_DSM_INTERSECTION = 2
        ERROR = 10
    """
    returnValue = COMPLETE_DSM_INTERSECTION
    # open the source image
    sourceImage = gdal.Open(args_source_image, gdal.GA_ReadOnly)
    if not sourceImage:
        return ERROR
    sourceBand = sourceImage.GetRasterBand(1)

    if (args_raytheon_rpc):
        # read the RPC from raytheon file
        print("Reading RPC from Raytheon file: {}".format(args_raytheon_rpc))
        model = raytheon_rpc.read_raytheon_rpc_file(args_raytheon_rpc)
    else:
        # read the RPC from RPC Metadata in the image file
        print("Reading RPC Metadata from {}".format(args_source_image))
        rpcMetaData = sourceImage.GetMetadata('RPC')
        model = rpc.rpc_from_gdal_dict(rpcMetaData)
    if model is None:
        print("Error reading the RPC")
        return ERROR

    # open the DSM
    dsm = gdal.Open(args_dsm, gdal.GA_ReadOnly)
    if not dsm:
        return ERROR
    band = dsm.GetRasterBand(1)
    dsmRaster = band.ReadAsArray(
        xoff=0, yoff=0,
        win_xsize=dsm.RasterXSize, win_ysize=dsm.RasterYSize)
    dsm_nodata_value = band.GetNoDataValue()
    print("DSM raster shape {}".format(dsmRaster.shape))

    if args_dtm:
        dtm = gdal.Open(args_dtm, gdal.GA_ReadOnly)
        if not dtm:
            return ERROR
        band = dtm.GetRasterBand(1)
        dtmRaster = band.ReadAsArray(
            xoff=0, yoff=0,
            win_xsize=dtm.RasterXSize, win_ysize=dtm.RasterYSize)
        newRaster = numpy.where(dsmRaster != dsm_nodata_value, dsmRaster, dtmRaster)
        dsmRaster = newRaster

    # apply morphology to denoise the DSM
    if (args_denoise_radius > 0):
        morph_struct = circ_structure(args_denoise_radius)
        dsmRaster = morphology.grey_opening(dsmRaster, structure=morph_struct)
        dsmRaster = morphology.grey_closing(dsmRaster, structure=morph_struct)

    # create the rectified image
    driver = dsm.GetDriver()
    driverMetadata = driver.GetMetadata()
    destImage = None
    arrayX = None
    arrayY = None
    arrayZ = None
    if driverMetadata.get(gdal.DCAP_CREATE) == "YES":
        print("Create destination image of "
              "size:({}, {}) ...".format(dsm.RasterXSize, dsm.RasterYSize))
        # georeference information
        projection = dsm.GetProjection()
        transform = dsm.GetGeoTransform()
        gcpProjection = dsm.GetGCPProjection()
        gcps = dsm.GetGCPs()
        options = ["COMPRESS=DEFLATE"]
        # ensure that space will be reserved for geographic corner coordinates
        # (in DMS) to be set later
        if (driver.ShortName == "NITF" and not projection):
            options.append("ICORDS=G")
        # If I try to use AddBand with GTiff I get:
        # Dataset does not support the AddBand() method.
        # So I create all bands using the same type at the begining
        destImage = driver.Create(
            args_destination_image, xsize=dsm.RasterXSize,
            ysize=dsm.RasterYSize,
            bands=sourceImage.RasterCount, eType=sourceBand.DataType,
            options=options)

        if (projection):
            # georeference through affine geotransform
            destImage.SetProjection(projection)
            destImage.SetGeoTransform(transform)
            pixels = numpy.arange(0, dsm.RasterXSize)
            pixels = numpy.tile(pixels, dsm.RasterYSize)
            lines = numpy.arange(0, dsm.RasterYSize)
            lines = numpy.repeat(lines, dsm.RasterXSize)
            arrayX = transform[0] + pixels * transform[1] + lines * transform[2]
            arrayY = transform[3] + pixels * transform[4] + lines * transform[5]
            arrayZ = dsmRaster[lines, pixels]
            validIdx = arrayZ != dsm_nodata_value
            pixels = pixels[validIdx]
            lines = lines[validIdx]
            arrayX = arrayX[validIdx]
            arrayY = arrayY[validIdx]
            arrayZ = arrayZ[validIdx]

        else:
            # georeference through GCPs
            destImage.SetGCPs(gcps, gcpProjection)
            # not implemented: compute arrayX, arrayY, arrayZ
            print("Not implemented yet")
            return ERROR
    else:
        print("Driver {} does not supports Create().".format(driver))
        return ERROR

    # convert coordinates to Long/Lat
    srs = osr.SpatialReference(wkt=projection)
    proj_srs = srs.ExportToProj4()
    inProj = pyproj.Proj(proj_srs)
    outProj = pyproj.Proj('+proj=longlat +datum=WGS84')
    arrayX, arrayY = pyproj.transform(inProj, outProj, arrayX, arrayY)

    # Sort the points by height so that higher points project last
    if (args_occlusion_thresh > 0):
        print("Sorting by Height")
        heightIdx = numpy.argsort(arrayZ)
        arrayX = arrayX[heightIdx]
        arrayY = arrayY[heightIdx]
        arrayZ = arrayZ[heightIdx]
        lines = lines[heightIdx]
        pixels = pixels[heightIdx]

    # project the points
    minZ = numpy.amin(arrayZ)
    maxZ = numpy.amax(arrayZ)
    # project points to get image indexes and save their height into the image
    print("Project {} points to destination image ...".format(len(arrayX)))
    print("Points min/max Z: {}/{}  ...".format(minZ, maxZ))

    print("Projecting Points")
    imgPoints = model.project(numpy.array([arrayX, arrayY, arrayZ]).transpose())
    intImgPoints = imgPoints.astype(numpy.int).transpose()

    # coumpute the bound of the relevant AOI in the source image
    print("Source Image size: ", [sourceImage.RasterXSize, sourceImage.RasterYSize])
    minPoint = numpy.maximum([0, 0], numpy.min(intImgPoints, 1))
    print("AOI min: ", minPoint)
    maxPoint = numpy.minimum(numpy.max(intImgPoints, 1),
                             [sourceImage.RasterXSize,
                              sourceImage.RasterYSize])
    print("AOI max: ", maxPoint)
    cropSize = maxPoint - minPoint
    if numpy.any(cropSize < 1):
        print("DSM does not intersect source image")
        returnValue = EMPTY_DSM_INTERSECTION

    # shift the projected image point to the cropped AOI space
    intImgPoints[0] -= minPoint[0]
    intImgPoints[1] -= minPoint[1]

    # find indicies of points that fall inside the image bounds
    print("Source raster shape {}".format(cropSize))
    validIdx = numpy.logical_and.reduce((intImgPoints[1] < cropSize[1],
                                         intImgPoints[1] >= 0,
                                         intImgPoints[0] < cropSize[0],
                                         intImgPoints[0] >= 0))
    intImgPoints = intImgPoints[:, validIdx]

    # keep only the points that are in the image
    numOut = numpy.size(validIdx) - numpy.count_nonzero(validIdx)
    if (numOut > 0 and not returnValue == EMPTY_DSM_INTERSECTION):
        print("Skipped {} points outside of image".format(numOut))
        returnValue = PARTIAL_DSM_INTERSECTION

    # use a height map to test for occlusion
    if (args_occlusion_thresh > 0):
        print("Mapping occluded points")
        valid_arrayZ = arrayZ[validIdx]
        # render a height map in the source image space
        height_map = numpy.full(cropSize[::-1], -numpy.inf, dtype=numpy.float32)
        height_map[intImgPoints[1], intImgPoints[0]] = valid_arrayZ

        # get a mask of points that locally are (approximately)
        # the highest point in the map
        is_max_height = height_map[intImgPoints[1], intImgPoints[0]] \
            <= valid_arrayZ + args_occlusion_thresh
        num_occluded = numpy.size(is_max_height) - numpy.count_nonzero(is_max_height)
        print("Skipped {} occluded points".format(num_occluded))

        # keep only non-occluded image points
        intImgPoints = intImgPoints[:, is_max_height]
        # disable occluded points in the valid pixel mask
        validIdx[numpy.nonzero(validIdx)[0][numpy.logical_not(is_max_height)]] = False

    for bandIndex in range(1, sourceImage.RasterCount + 1):
        print("Processing band {} ...".format(bandIndex))
        sourceBand = sourceImage.GetRasterBand(bandIndex)
        nodata_value = sourceBand.GetNoDataValue()
        # for now use zero as a no-data value if one is not specified
        # it would probably be better to add a mask (alpha) band instead
        if nodata_value is None:
            nodata_value = 0
        if numpy.any(cropSize < 1):
            # read one value for data type
            sourceRaster = sourceBand.ReadAsArray(
                xoff=0, yoff=0, win_xsize=1, win_ysize=1)
            destRaster = numpy.full(
                (dsm.RasterYSize, dsm.RasterXSize), nodata_value,
                dtype=sourceRaster.dtype)
        else:
            sourceRaster = sourceBand.ReadAsArray(
                xoff=int(minPoint[0]), yoff=int(minPoint[1]),
                win_xsize=int(cropSize[0]), win_ysize=int(cropSize[1]))

            print("Copying colors ...")
            destRaster = numpy.full(
                (dsm.RasterYSize, dsm.RasterXSize), nodata_value,
                dtype=sourceRaster.dtype)
            destRaster[lines[validIdx], pixels[validIdx]] = sourceRaster[
                intImgPoints[1], intImgPoints[0]]

        print("Write band ...")
        destBand = destImage.GetRasterBand(bandIndex)
        destBand.SetNoDataValue(nodata_value)
        destBand.WriteArray(destRaster)
    return returnValue