Ejemplo n.º 1
0
def background_subtract(im,strel=strel()):
    if len(im.shape)==2:
        bg = morphology.grey_opening(im,structure=strel)
    elif len(im.shape)==3:
        bg = np.zeros(im.shape)
        for k in range(3):
            bg[:,:,k] = morphology.grey_opening(im[:,:,k],structure=strel)
    return im-bg
Ejemplo n.º 2
0
def opening(parameters):
    """Calculates morphological opening of a greyscale image.

    This is equal to performing a dilation and then an erosion.

    It wraps `scipy.ndimage.morphology.grey_closing`. The `footprint`,
    `structure`, `output`, `mode`, `cval` and `origin` options are not
    supported.

    Keep in mind that `mode` and `cval` influence the results. In this case
    the default mode is used, `reflect`.

    :param parameters['data'][0]: input array
    :type parameters['data'][0]: numpy.array
    :param parameters['size']: which neighbours to take into account, defaults
                               to (3, 3) a.k.a. numpy.ones((3, 3))
    :type parameters['size']: list

    :return: numpy.array

    """
    data = parameters['data'][0]
    size = tuple(parameters['size'])

    return morphology.grey_opening(data, size=size)
Ejemplo n.º 3
0
def handle_scene(scene_dir, models_dir, obj_group_size):
    rgb_dir = scene_dir / 'rgb'
    assert rgb_dir.is_dir()
    print('Loading scene file list from directory {}...'.format(rgb_dir))
    rgb_list = sorted(list(rgb_dir.glob('*.png')))
    print('\tGot {} files from {} to {}'.format(len(rgb_list), rgb_list[0],
                                                rgb_list[-1]))

    print('Loading GT poses from {}...'.format(scene_dir / 'gt.yml'))
    with (scene_dir / 'gt.yml').open('r') as f:
        scenes_objs_gt = [objs_gt for _, objs_gt in yaml.load(f).items()]

    infofile = scene_dir / 'info.yml'
    print('Loading camera info from {}...'.format(infofile))
    with infofile.open('r') as f:
        cam_Ks = [
            np.array(cam_info['cam_K']).reshape(3, 3)
            for _, cam_info in yaml.load(f).items()
        ]

    assert len(scenes_objs_gt) == len(cam_Ks)

    outdir = scene_dir / 'mask'
    if not outdir.is_dir():
        outdir.mkdir()

    seqid_string = scene_dir.name
    print('Traversing scene in sequence {}...'.format(seqid_string))

    for rgb_file, cam_K, objs_gt in zip(rgb_list, cam_Ks, scenes_objs_gt):
        with Image.open(rgb_file) as img:
            img_shape = img.size[1], img.size[0]

        img_z = np.empty(img_shape, dtype=np.float)
        img_z.fill(10**6)
        img_mask = np.zeros(img_shape, dtype=np.uint8)
        run_inst_ids = {}
        for obj_gt in objs_gt:
            obj_id = obj_gt['obj_id']
            model = load_model(obj_id, models_dir)

            inst_z = generate_model_depth_image(model, obj_gt, cam_K,
                                                img_shape)
            # remove salt noise from non-dense model / view ratio
            inst_z = morphology.grey_opening(inst_z, size=(3, 3))

            inst_mask = inst_z < img_z
            img_z[inst_mask] = inst_z[inst_mask]
            local_inst_id = run_inst_ids.get(obj_id, 0)
            assert local_inst_id < obj_group_size
            global_inst_id = obj_id * obj_group_size + local_inst_id
            assert global_inst_id < 256
            img_mask[inst_mask] = global_inst_id
            run_inst_ids[obj_id] = local_inst_id + 1

        # Save result
        outfile = outdir / rgb_file.name
        print('\tSaving output file {} with {} annotated instances...'.format(
            outfile, len(objs_gt)))
        misc.imsave(str(outfile), img_mask)
Ejemplo n.º 4
0
def Filtro_opening(matrix_imagem):
    #gaussian gradiente de magnitude

    imagens_filtrada1 = grey_opening(matrix_imagem, size=5)
    #imagens_filtrada2=grey_opening(imagens_filtrada1,size=5)
    imagens_filtrada = grey_closing(imagens_filtrada1, size=5)

    return imagens_filtrada
Ejemplo n.º 5
0
    def open(self,window_size=(2,2)):
        """
            Perform the opening of this image.

            http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.grey_opening.html
        """
        self.data = np.max(self.data) - self.data
        self.data -= grey_opening(self.data,size=window_size)
        self.data = np.max(self.data) - self.data
Ejemplo n.º 6
0
def get_segmentation_mask(I, mask_color=(1., 1., 1.)):
    channel_masks = I.copy()
    for c in range(3):
        channel_masks[:, :, c] = (I[:, :, c] == mask_color[c]).astype(int)
    mask = np.prod(channel_masks, axis=-1)
    k = np.ones((3, 3), dtype=np.float32)
    mask = spndm.grey_closing(mask, footprint=k)
    mask = spndm.grey_opening(mask, footprint=k)
    mask = np.clip(mask, 0., 1.)
    return mask
Ejemplo n.º 7
0
 def do_binary_opening(self, area=numpy.ones((3, 3)), iteration=1):
     if self._is_binary:
         self._array_image = self._array_image / 255
         self._array_image = morphology.binary_opening(self._array_image,
                                                       area,
                                                       iterations=iteration)
         self._array_image = 255 * self._array_image
     else:
         self._array_image = morphology.grey_opening(
             self._array_image, area.shape)
     self._convert_image()
Ejemplo n.º 8
0
def get_segmentation_mask(I, mask_color=(1., 1., 1.)):
    from scipy.ndimage import morphology as spndm
    channel_masks = I.copy()
    for c in range(3):
        channel_masks[:, :, c] = (I[:, :, c] == mask_color[c]).astype(int)
    mask = np.prod(channel_masks, axis=-1)
    k = np.ones((3, 3), dtype=np.float32)
    mask = spndm.grey_closing(mask, footprint=k)
    mask = spndm.grey_opening(mask, footprint=k)
    mask = np.clip(mask, 0., 1.)
    return mask
Ejemplo n.º 9
0
def morph_filter(raw_image, closing_radius, opening_radius):

    # Create circular masks
    close_footprint, open_footprint = [[[0 for ii in range(r * 2 + 1)] for i in range(r * 2 + 1)] for r in [closing_radius, opening_radius]]
    for fp in [close_footprint, open_footprint]:
	    r = (len(fp) - 1) / 2
	    for i in range(len(fp)):
		    for ii in range(len(fp)):
			    if (i - r) ** 2 + (ii - r) ** 2 <= r ** 2: fp[i][ii] = 1

    # Perform filtering
    filtered_image = raw_image
    filtered_image = grey_closing(filtered_image, footprint=close_footprint)
    filtered_image = grey_opening(filtered_image, footprint=open_footprint)
    return filtered_image
Ejemplo n.º 10
0
    def write_volume_labels(self,z_range=2,opening_strel=(1,15),goodness_threshold=0.25,use_fit=True):
        nvol,nslow,ndepth,nfast = self.h5.get(self.data_block).shape
        if use_fit:
            self.logger.info('write_volume_labels: using fit')
            soffset_matrix = np.round(self.h5.get('model/z_offset_fit')[:]).astype(np.float64)
        else:
            offset_matrix = self.h5.get('model/z_offsets')[:].astype(np.float64)
            self.logger.info('write_volume_labels: using offsets (no fit)')
            soffset_matrix = np.zeros_like(offset_matrix)
            for k in range(offset_matrix.shape[0]):
                soffset_matrix[k,:,:] = grey_opening(offset_matrix[k,:,:],opening_strel)
            

        #goodness_matrix = self.h5.get('model/z_offset_goodness')[:]

        label_keys = self.h5.get('model/labels').keys()
        labels = {}
        volume_labels = {}
        projections = {}
        
        for key in label_keys:
            labels[key] = self.h5.get('model/labels')[key].value
            volume_labels[key] = np.zeros((nvol,nslow,nfast))
            projections[key] = np.zeros((nvol,nslow,nfast))
            
        for ivol in range(nvol):
            avol = np.abs(self.h5.get(self.data_block)[ivol,:,:,:])
            self.logger.info('write_volume_labels: Labeling volume %d of %d.'%(ivol+1,nvol))
            self.logger.info('write_volume_labels: Labels: %s.'%','.join(label_keys))

            for islow in range(nslow):
                if (islow+1)%20==0:
                    self.logger.info('write_volume_labels: %d percent done.'%(float(islow+1)/float(nslow)*100))
                for ifast in range(nfast):
                    test = avol[islow,:,ifast]
                    offset = soffset_matrix[ivol,islow,ifast]
                    for key in label_keys:
                        model_z_index = labels[key]
                        volume_labels[key][ivol,islow,ifast] = model_z_index-offset
                        projections[key][ivol,islow,ifast] = np.mean(test[model_z_index-offset-z_range:model_z_index-offset+z_range+1])

        for key in label_keys:
            location = 'model/volume_labels/%s'%key
            plocation = 'projections/%s'%key
            self.h5.put(location,volume_labels[key])
            self.h5.put(plocation,projections[key])
Ejemplo n.º 11
0
    def _filter(self, grid):
        from scipy.ndimage.morphology import grey_opening

        array = grid.interpolate(np.min, "z").array

        w_k_list = [
            self._window_size(i, self.b) for i in range(self.n_windows)
        ]
        w_k_min = w_k_list[0]
        A = array
        m = A.shape[0]
        n = A.shape[1]
        flag = np.zeros((m, n))
        dh_t = self.dh_0
        for k, w_k in enumerate(w_k_list):
            opened = grey_opening(array, (w_k, w_k))
            if w_k == w_k_min:
                w_k_1 = 0
            else:
                w_k_1 = w_k_list[k - 1]
            for i in range(0, m):
                P_i = A[i, :]
                Z = P_i
                Z_f = opened[i, :]
                for j in range(0, n):
                    if Z[j] - Z_f[j] > dh_t:
                        flag[i, j] = w_k
                P_i = Z_f
                A[i, :] = P_i

            dh_t = self._dht(w_k, w_k_1, self.dh_0, self.dh_max,
                             self.cell_size)

        if np.sum(flag) == 0:
            raise ValueError(
                "No pixels were determined to be ground, please adjust the filter parameters."
            )

        # Remove interpolated cells
        empty = grid.empty_cells
        empty_y, empty_x = empty[:, 0], empty[:, 1]
        A[empty_y, empty_x] = np.nan

        B = np.where(flag == 0, A, np.nan)
        return B
Ejemplo n.º 12
0
    def _filter(self):
        from scipy.ndimage.morphology import grey_opening

        w_k_list = [
            self._window_size(i, self.b) for i in range(self.n_windows)
        ]
        w_k_min = w_k_list[0]
        A = self.array
        m = A.shape[0]
        n = A.shape[1]
        flag = np.zeros((m, n))
        for k, w_k in enumerate(w_k_list):
            opened = grey_opening(self.array, (w_k, w_k))
            if w_k == w_k_min:
                w_k_1 = 0
            else:
                w_k_1 = w_k_list[k - 1]
            for i in range(0, m):
                P_i = A[i, :]
                Z = P_i
                Z_f = opened[i, :]
                dh_t = self._dht(Z, w_k, w_k_1, self.dh_0, self.dh_max,
                                 self.cell_size)
                for j in range(0, n):
                    if Z[j] - Z_f[j] > dh_t:
                        flag[i, j] = w_k
                P_i = Z_f
                A[i, :] = P_i

        if np.sum(flag) == 0:
            return (None)

        # Remove interpolated cells
        empty = self.grid.empty_cells
        empty_y, empty_x = empty[:, 0], empty[:, 1]
        A[empty_y, empty_x] = np.nan
        B = np.where(flag != 0, A, np.nan)
        return B
Ejemplo n.º 13
0
def grey_open(im,strel=strel()):
    return morphology.grey_opening(im,structure=strel)
Ejemplo n.º 14
0
    def distort(imgae, config):
        """ 向图像中添加噪声
        这个函数修改自gqcnn的源程序中,具体原理参考论文
        """
        imgae_ = imgae.copy()
        # config = self._config
        im_height = imgae_.shape[0]
        im_width = imgae_.shape[1]
        im_center = np.array([float(im_height-1)/2, float(im_width-1)/2])
        # denoising and synthetic data generation
        if config['multiplicative_denoising']:
            gamma_shape = config['gamma_shape']
            gamma_scale = 1.0 / gamma_shape
            mult_samples = ss.gamma.rvs(gamma_shape, scale=gamma_scale)
            imgae_ = imgae_ * mult_samples

        # randomly dropout regions of the image for robustness
        if config['image_dropout']:
            if np.random.rand() < config['image_dropout_rate']:
                nonzero_px = np.where(imgae_ > 0)
                nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
                num_nonzero = nonzero_px.shape[0]
                num_dropout_regions = ss.poisson.rvs(
                    config['dropout_poisson_mean'])

                # sample ellipses
                dropout_centers = np.random.choice(
                    num_nonzero, size=num_dropout_regions)
                x_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)
                y_radii = ss.gamma.rvs(
                    config['dropout_radius_shape'], scale=config['dropout_radius_scale'], size=num_dropout_regions)

                # set interior pixels to zero
                for j in range(num_dropout_regions):
                    ind = dropout_centers[j]
                    dropout_center = nonzero_px[ind, :]
                    x_radius = x_radii[j]
                    y_radius = y_radii[j]
                    dropout_px_y, dropout_px_x = sd.ellipse(
                        dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                    imgae_[dropout_px_y, dropout_px_x] = 0.0

        # dropout a region around the areas of the image with high gradient
        if config['gradient_dropout']:
            if np.random.rand() < config['gradient_dropout_rate']:
                grad_mag = sf.gaussian_gradient_magnitude(
                    imgae_, sigma=config['gradient_dropout_sigma'])
                thresh = ss.gamma.rvs(
                    config['gradient_dropout_shape'], config['gradient_dropout_scale'], size=1)
                high_gradient_px = np.where(grad_mag > thresh)
                imgae_[high_gradient_px[0], high_gradient_px[1]] = 0.0

        # add correlated Gaussian noise
        if config['gaussian_process_denoising']:
            gp_rescale_factor = config['gaussian_process_scaling_factor']
            gp_sample_height = int(im_height / gp_rescale_factor)
            gp_sample_width = int(im_width / gp_rescale_factor)
            gp_num_pix = gp_sample_height * gp_sample_width
            if np.random.rand() < config['gaussian_process_rate']:
                gp_noise = ss.norm.rvs(scale=config['gaussian_process_sigma'], size=gp_num_pix).reshape(
                    gp_sample_height, gp_sample_width)
                # sm.imresize 有警告将被弃用
                # gp_noise = sm.imresize(
                #     gp_noise, gp_rescale_factor, interp='bicubic', mode='F')
                # st.resize 用来替用将被弃用的sm.imresize
                # gp_noise = st.resize(gp_noise, (im_height, im_width))
                gp_noise = cv2.resize(
                    gp_noise, (im_height, im_width), interpolation=cv2.INTER_CUBIC)
                imgae_[imgae_ > 0] += gp_noise[imgae_ > 0]

        # run open and close filters to
        if config['morphological']:
            sample = np.random.rand()
            morph_filter_dim = ss.poisson.rvs(
                config['morph_poisson_mean'])
            if sample < config['morph_open_rate']:
                imgae_ = snm.grey_opening(
                    imgae_, size=morph_filter_dim)
            else:
                closed_imgae_ = snm.grey_closing(
                    imgae_, size=morph_filter_dim)

                # set new closed pixels to the minimum depth, mimicing the table
                new_nonzero_px = np.where(
                    (imgae_ == 0) & (closed_imgae_ > 0))
                closed_imgae_[new_nonzero_px[0], new_nonzero_px[1]] = np.min(
                    imgae_[imgae_ > 0])
                imgae_ = closed_imgae_.copy()

        # randomly dropout borders of the image for robustness
        if config['border_distortion']:
            grad_mag = sf.gaussian_gradient_magnitude(
                imgae_, sigma=config['border_grad_sigma'])
            high_gradient_px = np.where(
                grad_mag > config['border_grad_thresh'])
            high_gradient_px = np.c_[
                high_gradient_px[0], high_gradient_px[1]]
            num_nonzero = high_gradient_px.shape[0]
            num_dropout_regions = ss.poisson.rvs(
                config['border_poisson_mean'])

            # sample ellipses
            dropout_centers = np.random.choice(
                num_nonzero, size=num_dropout_regions)
            x_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)
            y_radii = ss.gamma.rvs(
                config['border_radius_shape'], scale=config['border_radius_scale'], size=num_dropout_regions)

            # set interior pixels to zero or one
            for j in range(num_dropout_regions):
                ind = dropout_centers[j]
                dropout_center = high_gradient_px[ind, :]
                x_radius = x_radii[j]
                y_radius = y_radii[j]
                dropout_px_y, dropout_px_x = sd.ellipse(
                    dropout_center[0], dropout_center[1], y_radius, x_radius, shape=imgae_.shape)
                if np.random.rand() < 0.5:
                    imgae_[dropout_px_y, dropout_px_x] = 0.0
                else:
                    imgae_[dropout_px_y, dropout_px_x] = imgae_[
                        dropout_center[0], dropout_center[1]]

        # randomly replace background pixels with constant depth
        if config['background_denoising']:
            if np.random.rand() < config['background_rate']:
                imgae_[imgae_ > 0] = config['background_min_depth'] + (
                    config['background_max_depth'] - config['background_min_depth']) * np.random.rand()

        # symmetrize images
        if config['symmetrize']:
            # rotate with 50% probability
            if np.random.rand() < 0.5:
                theta = 180.0
                rot_map = cv2.getRotationMatrix2D(
                    tuple(im_center), theta, 1)
                imgae_ = cv2.warpAffine(
                    imgae_, rot_map, (im_height, im_width), flags=cv2.INTER_NEAREST)
            # reflect left right with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.fliplr(imgae_)
            # reflect up down with 50% probability
            if np.random.rand() < 0.5:
                imgae_ = np.flipud(imgae_)
        return imgae_
Ejemplo n.º 15
0
def pre_process_image(img, path=None):
    if path: file_name, file_ext = os.path.splitext(os.path.basename(path))

    # MORPHOLOGY CLOSING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.grey_closing.html#scipy.ndimage.morphology.grey_closing
    # http://en.wikipedia.org/wiki/Mathematical_morphology
    #
    # Cancellare ogni lettera e imperfezione
    # Parametri: qui uso il metodo nella sua forma base dove il secondo parametro e' un rettangolo (altezza, larghezza)
    # Secondo me teoricamente dovrei matchare la dimesnione media di una parola (una lettera e' troppo piccolo, una riga e' troppo grande)
    #
    #orig#im = morphology.grey_closing(img, (1, 101))
    im = morphology.grey_closing(img, (15, 105)) #odd numbers are better
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step1%s' % (file_name, file_ext)), im)

    # OTSU THRESHOLDING (statistically optimal)
    # http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#threshold
    # http://en.wikipedia.org/wiki/Otsu%27s_Method
    #
    # Trasformare l'immagine in due colori: nero per lo sfondo, bianco per il primo piano
    # Parametri: 0, 1 (valore per sfondo e primo piano) in produzione
    # 0, 255 se volgio vedere l'immagine in bianco e nero per debug
    #
    #orig#t, im = cv.threshold(im, 0, 1, cv.THRESH_OTSU)
    t, im = cv.threshold(im, 0, 255, cv.THRESH_OTSU)
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step2%s' % (file_name, file_ext)), im)
    
    # MORPHOLOGY OPENING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.grey_opening.html#scipy.ndimage.morphology.grey_opening
    # http://en.wikipedia.org/wiki/Mathematical_morphology
    #
    # Cancellare i sottili bordi bianchi 
    # Parametri: qui uso il metodo nella sua forma base dove il secondo parametro e' un rettangolo (altezza, larghezza)
    # Le dimensioni del rettangolo sono un quadrato di lato = dimesnione minima delpiu grosso extra bordo bianco
    #
    #origl# im = morphology.grey_opening(im, (51, 51))
    im = morphology.grey_opening(im, (51, 51)) #odd numbers are better
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step3%s' % (file_name, file_ext)), im)
    
    # CONNECTED-COMPONENT LABELING
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.measurements.label.html#scipy.ndimage.measurements.label
    # http://en.wikipedia.org/wiki/Connected-component_labeling
    #
    # Divido l'immagine (che ora e' teoricamente pulita di tutto il testo) in sotto immagini
    # Mantengo solo la sotto immagine piu grossa perche' dovrebbe essere la pagina (il resto lo coloro di nero)
    #
    # Il risultato puo essere:
    #  1 sotto img: caso perfetto, la sotto immagine e' il foglio (il resto e' lo sfondo che non conta, cioe' il bordo nero)
    #  2, 3, 4, 5 img: probabilmente c'e' un extra bordo bianco di disturbo su 1,2,3 o 4 lati
    #  6+ img: la pagina contiene delle immagini grosso che sono state suddivise
    #
    # Label divide l'immagine in sottoimmagini e assegna un numero ad ogni pixel (0 e' lo sfondo)
    # Tutti i pixel che hanno lo stesso numero fanno parte della stessa sotto immagine
    #
    # Restituisce:
    #  una matrice di dimensione uguale all'immagine sorgente dove ogni pixel ha un label
    #  il numero di sotto immagini identificate
    #
    lbl, ncc = label(im)
    # Identifico la sotto immagine piu grossa
    largest = 0, 0
    for i in range(1, ncc + 1):
        size = len(numpy.where(lbl == i)[0]) #counts how many times the value i is present in the lbl array
        if size > largest[1]:
            largest = i, size
    # Imposto il colore 0 a tutte le sottoimmagini tranne quell apiu grossa
    for i in range(1, ncc + 1):
        if i == largest[0]:
            continue
        im[lbl == i] = 0
        #Se volessi colorare le sotto immagini in scala di grigi
        #import math##
        #im[lbl == i] = math.floor(255/ncc-1)*ncc
    if path and SAVE_INTERMEDIATE_STEPS: imsave(os.path.join(END_FOLDER, '%s_step4%s' % (file_name, file_ext)), im)###################
    return im
Ejemplo n.º 16
0
# ===================== MANIPULATING THE IMAGE ==================
# remornalising the input. 
# If needed, a nonlinear transformation can be performed changing the exponent variable
# Larger exponents suppress the low intensity region of the spectrum 
print "====> Starting the manipulations...\n"

if (exponent!= 1):
    print "- Nonlinear Stretching..."
    # Data=((Data/float(Data.max()/theta) )**exponent*255/phi).astype(int)
    Data=((Data/float(Data.max()/theta) )**exponent*255/phi).astype(int)
    if(save):
        pl.save("Stretching", Data)

if(opening):
    print "- Morphological Opening..."
    Data=grey_opening(Data, structure=Cross)
    if(save):
        pl.save("Opening", Data)

if(erosion):
    print "- Morphological Erosion..."
    Data=grey_erosion(Data, structure=Cross)
    if(save):
        pl.save("Erosion", Data)
if(closing):
    print "- Morphological Closing..."
    Data=grey_closing(Data, structure=Cross)
    if(save):
        pl.save("Closing", Data)

# Remark: one could keep on with other transformations, other kernels and so on
Ejemplo n.º 17
0
 def gray_opening(self, *args, **kw):
     '''see scipy.ndimage.morphology.grey_opening'''
     return Image(_morphology.grey_opening(self, *args, **kw)).convert_type(self.dtype)
Ejemplo n.º 18
0
    for k in [10, 20]:
        m1.saveImage(outputFolder+ timeString() + m1.name + ".png")
        x = m.getKmeans(k =k, threshold = threshold)
        x['pattern'].saveImage(outputFolder+ getTimeString() + m.name + "threshold%d_clusters%d.png" % (threshold,k))
        res[(threshold, k)] = x['pattern']

"""
#########################################

threshold = 40
k =  10

m.load()
m1=m.threshold(0)
m1.show()
m1.matrix = mor.grey_opening(m1.matrix, 5) ####
m1.matrix = mor.grey_closing(m1.matrix, 5) ####

m1.show()
m1.backupMatrix(0)
m1.showWithCoast()
m1.saveImage(outputFolder+m.name+'grey_opening_closing5.png')
m1.restoreMatrix()

threshold=30
x = m1.getKmeans(k =k, threshold = threshold)
x['pattern'].saveImage(outputFolder+ getTimeString() + m.name + "threshold%d_clusters%d.png" % (threshold,k))
threshold=40

m.load()
m.show()
Ejemplo n.º 19
0
 if True:
     '''Watershed segmentation of the objects'''
     
     #Selection of local maxima
     obj_height = obj_restored * im_max
     marks = skmorpho.h_maxima(obj_height,1, selem=None)
     #Labelling as markers
     marks = ndi.label(marks)[0]
     #watershed segmentation
     label_obj = skmorpho.watershed(-obj_height, marks)
     
     #Coarser segmentation
     #reference elevation image
     ref = im_accum * im_min * obj_restored
     #removal of small objects already segmented
     ref = morpho.grey_opening(ref, size = 1)
     #Downsizing factor
     DS_factor = 10
     #Resize image     
     image_resized = resize(ref, (ref.shape[0] // DS_factor, ref.shape[1] // DS_factor))
     #New local maxima
     marks2 = skmorpho.h_maxima(image_resized,10, selem=None)
     marks2 = ndi.label(marks2)[0]# + np.max(obj_labels) to avoid giving the same labels, 
     #but removed here for visualisation
     
     #Locate the local maxima
     (indx, indy) = np.where(marks2 != 0)
     marks_resized = obj_height * 0
     #Assign local maxima in an image with original size
     marks_resized[DS_factor*indx, DS_factor*indy] = marks2[indx, indy]
     #Dilate the maxima to ease the watershed
Ejemplo n.º 20
0
def orthorectify(args_source_image, args_dsm, args_destination_image,
                 args_occlusion_thresh=1.0, args_denoise_radius=2,
                 args_raytheon_rpc=None, args_dtm=None):
    """
    Orthorectify an image given the DSM

    Args:
        source_image: Source image file name
        dsm: Digital surface model (DSM) image file name
        destination_image: Orthorectified image file name
        occlusion-thresh: Threshold on height difference for detecting
                          and masking occluded regions (in meters)
        denoise-radius: Apply morphological operations with this radius
                        to the DSM reduce speckled noise
        raytheon-rpc: Raytheon RPC file name. If not provided
                      the RPC is read from the source_image

    Returns:
        COMPLETE_DSM_INTERSECTION = 0
        PARTIAL_DSM_INTERSECTION = 1
        EMPTY_DSM_INTERSECTION = 2
        ERROR = 10
    """
    returnValue = COMPLETE_DSM_INTERSECTION
    # open the source image
    sourceImage = gdal.Open(args_source_image, gdal.GA_ReadOnly)
    if not sourceImage:
        return ERROR
    sourceBand = sourceImage.GetRasterBand(1)

    if (args_raytheon_rpc):
        # read the RPC from raytheon file
        print("Reading RPC from Raytheon file: {}".format(args_raytheon_rpc))
        model = raytheon_rpc.read_raytheon_rpc_file(args_raytheon_rpc)
    else:
        # read the RPC from RPC Metadata in the image file
        print("Reading RPC Metadata from {}".format(args_source_image))
        rpcMetaData = sourceImage.GetMetadata('RPC')
        model = rpc.rpc_from_gdal_dict(rpcMetaData)
    if model is None:
        print("Error reading the RPC")
        return ERROR

    # open the DSM
    dsm = gdal.Open(args_dsm, gdal.GA_ReadOnly)
    if not dsm:
        return ERROR
    band = dsm.GetRasterBand(1)
    dsmRaster = band.ReadAsArray(
        xoff=0, yoff=0,
        win_xsize=dsm.RasterXSize, win_ysize=dsm.RasterYSize)
    dsm_nodata_value = band.GetNoDataValue()
    print("DSM raster shape {}".format(dsmRaster.shape))

    if args_dtm:
        dtm = gdal.Open(args_dtm, gdal.GA_ReadOnly)
        if not dtm:
            return ERROR
        band = dtm.GetRasterBand(1)
        dtmRaster = band.ReadAsArray(
            xoff=0, yoff=0,
            win_xsize=dtm.RasterXSize, win_ysize=dtm.RasterYSize)
        newRaster = numpy.where(dsmRaster != dsm_nodata_value, dsmRaster, dtmRaster)
        dsmRaster = newRaster

    # apply morphology to denoise the DSM
    if (args_denoise_radius > 0):
        morph_struct = circ_structure(args_denoise_radius)
        dsmRaster = morphology.grey_opening(dsmRaster, structure=morph_struct)
        dsmRaster = morphology.grey_closing(dsmRaster, structure=morph_struct)

    # create the rectified image
    driver = dsm.GetDriver()
    driverMetadata = driver.GetMetadata()
    destImage = None
    arrayX = None
    arrayY = None
    arrayZ = None
    if driverMetadata.get(gdal.DCAP_CREATE) == "YES":
        print("Create destination image of "
              "size:({}, {}) ...".format(dsm.RasterXSize, dsm.RasterYSize))
        # georeference information
        projection = dsm.GetProjection()
        transform = dsm.GetGeoTransform()
        gcpProjection = dsm.GetGCPProjection()
        gcps = dsm.GetGCPs()
        options = ["COMPRESS=DEFLATE"]
        # ensure that space will be reserved for geographic corner coordinates
        # (in DMS) to be set later
        if (driver.ShortName == "NITF" and not projection):
            options.append("ICORDS=G")
        # If I try to use AddBand with GTiff I get:
        # Dataset does not support the AddBand() method.
        # So I create all bands using the same type at the begining
        destImage = driver.Create(
            args_destination_image, xsize=dsm.RasterXSize,
            ysize=dsm.RasterYSize,
            bands=sourceImage.RasterCount, eType=sourceBand.DataType,
            options=options)

        if (projection):
            # georeference through affine geotransform
            destImage.SetProjection(projection)
            destImage.SetGeoTransform(transform)
            pixels = numpy.arange(0, dsm.RasterXSize)
            pixels = numpy.tile(pixels, dsm.RasterYSize)
            lines = numpy.arange(0, dsm.RasterYSize)
            lines = numpy.repeat(lines, dsm.RasterXSize)
            arrayX = transform[0] + pixels * transform[1] + lines * transform[2]
            arrayY = transform[3] + pixels * transform[4] + lines * transform[5]
            arrayZ = dsmRaster[lines, pixels]
            validIdx = arrayZ != dsm_nodata_value
            pixels = pixels[validIdx]
            lines = lines[validIdx]
            arrayX = arrayX[validIdx]
            arrayY = arrayY[validIdx]
            arrayZ = arrayZ[validIdx]

        else:
            # georeference through GCPs
            destImage.SetGCPs(gcps, gcpProjection)
            # not implemented: compute arrayX, arrayY, arrayZ
            print("Not implemented yet")
            return ERROR
    else:
        print("Driver {} does not supports Create().".format(driver))
        return ERROR

    # convert coordinates to Long/Lat
    srs = osr.SpatialReference(wkt=projection)
    proj_srs = srs.ExportToProj4()
    inProj = pyproj.Proj(proj_srs)
    outProj = pyproj.Proj('+proj=longlat +datum=WGS84')
    arrayX, arrayY = pyproj.transform(inProj, outProj, arrayX, arrayY)

    # Sort the points by height so that higher points project last
    if (args_occlusion_thresh > 0):
        print("Sorting by Height")
        heightIdx = numpy.argsort(arrayZ)
        arrayX = arrayX[heightIdx]
        arrayY = arrayY[heightIdx]
        arrayZ = arrayZ[heightIdx]
        lines = lines[heightIdx]
        pixels = pixels[heightIdx]

    # project the points
    minZ = numpy.amin(arrayZ)
    maxZ = numpy.amax(arrayZ)
    # project points to get image indexes and save their height into the image
    print("Project {} points to destination image ...".format(len(arrayX)))
    print("Points min/max Z: {}/{}  ...".format(minZ, maxZ))

    print("Projecting Points")
    imgPoints = model.project(numpy.array([arrayX, arrayY, arrayZ]).transpose())
    intImgPoints = imgPoints.astype(numpy.int).transpose()

    # coumpute the bound of the relevant AOI in the source image
    print("Source Image size: ", [sourceImage.RasterXSize, sourceImage.RasterYSize])
    minPoint = numpy.maximum([0, 0], numpy.min(intImgPoints, 1))
    print("AOI min: ", minPoint)
    maxPoint = numpy.minimum(numpy.max(intImgPoints, 1),
                             [sourceImage.RasterXSize,
                              sourceImage.RasterYSize])
    print("AOI max: ", maxPoint)
    cropSize = maxPoint - minPoint
    if numpy.any(cropSize < 1):
        print("DSM does not intersect source image")
        returnValue = EMPTY_DSM_INTERSECTION

    # shift the projected image point to the cropped AOI space
    intImgPoints[0] -= minPoint[0]
    intImgPoints[1] -= minPoint[1]

    # find indicies of points that fall inside the image bounds
    print("Source raster shape {}".format(cropSize))
    validIdx = numpy.logical_and.reduce((intImgPoints[1] < cropSize[1],
                                         intImgPoints[1] >= 0,
                                         intImgPoints[0] < cropSize[0],
                                         intImgPoints[0] >= 0))
    intImgPoints = intImgPoints[:, validIdx]

    # keep only the points that are in the image
    numOut = numpy.size(validIdx) - numpy.count_nonzero(validIdx)
    if (numOut > 0 and not returnValue == EMPTY_DSM_INTERSECTION):
        print("Skipped {} points outside of image".format(numOut))
        returnValue = PARTIAL_DSM_INTERSECTION

    # use a height map to test for occlusion
    if (args_occlusion_thresh > 0):
        print("Mapping occluded points")
        valid_arrayZ = arrayZ[validIdx]
        # render a height map in the source image space
        height_map = numpy.full(cropSize[::-1], -numpy.inf, dtype=numpy.float32)
        height_map[intImgPoints[1], intImgPoints[0]] = valid_arrayZ

        # get a mask of points that locally are (approximately)
        # the highest point in the map
        is_max_height = height_map[intImgPoints[1], intImgPoints[0]] \
            <= valid_arrayZ + args_occlusion_thresh
        num_occluded = numpy.size(is_max_height) - numpy.count_nonzero(is_max_height)
        print("Skipped {} occluded points".format(num_occluded))

        # keep only non-occluded image points
        intImgPoints = intImgPoints[:, is_max_height]
        # disable occluded points in the valid pixel mask
        validIdx[numpy.nonzero(validIdx)[0][numpy.logical_not(is_max_height)]] = False

    for bandIndex in range(1, sourceImage.RasterCount + 1):
        print("Processing band {} ...".format(bandIndex))
        sourceBand = sourceImage.GetRasterBand(bandIndex)
        nodata_value = sourceBand.GetNoDataValue()
        # for now use zero as a no-data value if one is not specified
        # it would probably be better to add a mask (alpha) band instead
        if nodata_value is None:
            nodata_value = 0
        if numpy.any(cropSize < 1):
            # read one value for data type
            sourceRaster = sourceBand.ReadAsArray(
                xoff=0, yoff=0, win_xsize=1, win_ysize=1)
            destRaster = numpy.full(
                (dsm.RasterYSize, dsm.RasterXSize), nodata_value,
                dtype=sourceRaster.dtype)
        else:
            sourceRaster = sourceBand.ReadAsArray(
                xoff=int(minPoint[0]), yoff=int(minPoint[1]),
                win_xsize=int(cropSize[0]), win_ysize=int(cropSize[1]))

            print("Copying colors ...")
            destRaster = numpy.full(
                (dsm.RasterYSize, dsm.RasterXSize), nodata_value,
                dtype=sourceRaster.dtype)
            destRaster[lines[validIdx], pixels[validIdx]] = sourceRaster[
                intImgPoints[1], intImgPoints[0]]

        print("Write band ...")
        destBand = destImage.GetRasterBand(bandIndex)
        destBand.SetNoDataValue(nodata_value)
        destBand.WriteArray(destRaster)
    return returnValue
Ejemplo n.º 21
0
from iDISCO.Visualization.Plot import plotTiling, plotOverlayLabel

from iDISCO.Utils.Timer import Timer;


img = numpy.random.rand(2000,2000) * 65535;
img = img.astype('int')

dataraw = dataset[:,:,1160];

img = dataraw[:,:];
img.shape


t = Timer();
res = grey_opening(img,#DoG filter
t.printElapsedTime('scipy');


#t.reset();
#res2 = open(img, structureElement('Disk', (30,30)).astype('bool'));
#t.printElapsedTime('mahotas');

#t.reset();
#res2 = open(img, structureElement('Disk', (30,30)).astype('bool'));
#t.printElapsedTime('mahotas');

t.reset();
se = structureElement('Disk', (15,15)).astype('uint8');
res2 = cv2.morphologyEx(img, cv2.MORPH_OPEN, se)
t.printElapsedTime('opencv');
Ejemplo n.º 22
0
from matplotlib import pyplot as plt
import sys,os
from scipy.ndimage.morphology import grey_opening
import scipy as sp


flist = glob.glob('/home/rjonnal/data/Dropbox/Share/2g_aooct_data/Data/2016.04.12_2/*.hdf5')


for f in flist:
    h5 = h5py.File(f)
    offset_matrix = h5['model/z_offsets'][:]
    goodness_matrix = h5['model/z_offset_goodness'][:]

    om = offset_matrix[0,:,:]
    oms = grey_opening(om,(1,15))


    mode = sp.stats.mode(oms.ravel())[0][0]
    mask = np.zeros(oms.shape)
    lower_threshold = np.mean(oms)-2.0*np.std(oms)
    upper_threshold = np.mean(oms)+2.0*np.std(oms)
    cond = np.logical_and(foffset_matrix>lower_threshold,foffset_matrix<upper_threshold)
    mask[np.where(cond)] = 1


    
    plt.figure(figsize=(18,6))
    clim = np.min(om),np.max(om)
    plt.subplot(131)
    plt.imshow(om,interpolation='none',clim=clim,cmap='gray')