Пример #1
0
def compressed_holographic_retrieval(data,ARGS):
  window_f = np.hamming
  kx,ky = find_carrier(data)
  Kx,Ky = np.fft.fftfreq(data.shape[0]),np.fft.fftfreq(data.shape[1])
  KX,KY = np.meshgrid(Kx,Ky)
  s2 = np.sqrt(kx**2 + ky**2)/4.
  G2 = gaussian(KX,KY,0,0,s2)

  nx,ny = data.shape
  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))
  r = plane_wave(X,Y,kx,ky)
  i3 = data*r

#  print np.around(float(s2*256)/ax) - float(s2*256)/ax
  a3 = 14./256
  G3_kernel = ndi.zoom(G2,a3)
#  phplot.dBshow(G3_kernel)
  g3_kernel = np.fft.ifft2(G3_kernel)
  w3zoom = np.sqrt(np.outer(window_f(g3_kernel.shape[0]),window_f(g3_kernel.shape[1])))
  g3_kernel *= w3zoom
  o3 = (ndi.convolve(i3.real,g3_kernel.real) -\
        ndi.convolve(i3.imag,g3_kernel.imag) +\
        1j*ndi.convolve(i3.real,g3_kernel.imag) +\
        1j*ndi.convolve(i3.imag,g3_kernel.real))
  p3 = np.arctan2(o3.imag,o3.real)
#  phplot.imageshow(p3)
  return p3
def vesiclerf_feats(em):
    # return value
    xt = []
    num_features = 2

    # Kernels
    B0 = np.ones([5, 5, 1]) / (5 * 5 * 1)
    B1 = np.ones([15, 15, 3]) / (15 * 15 * 3)
    B2 = np.ones([25, 25, 5]) / (25 * 25 * 5)

    ### Intensity Feats ###
    # find weighted average of features
    I0 = ndimage.convolve(em, B0, mode="constant")
    I2 = ndimage.convolve(em, B1, mode="constant")

    # reshape data
    # I0 = [np.reshape(I0,(I0.size,1)).tolist(), num_features]
    # I2 = [np.reshape(I2,(I2.size,1)).tolist(), num_features]
    # I0 = np.reshape(I0,(I0.size,1))
    I2 = np.reshape(I2, (I2.size, 1))
    xt = I2
    # xt.append(I0)
    # xt.append(I2)

    return xt
Пример #3
0
def compressed_wave_retrieval(data,ARGS):
  window_f = np.hamming

  kx,ky = find_carrier(data)
  nx,ny = data.shape
  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))

  G = find_gaussian(data,kx,ky)
#  print float(ik[1])/256
#  print np.around(float(ik[1])/ax) - float(ik[1])/ax
  a = [11./256.,12./256.]
  G_kernel = ndi.zoom(G,a)
#  phplot.dBshow(G_kernel)
  g_kernel = np.fft.ifft2(G_kernel)
  wzoom = np.sqrt(np.outer(window_f(g_kernel.shape[0]),window_f(g_kernel.shape[1])))
  g_kernel *= wzoom
#  print g_kernel.shape
#  phplot.imageshow(g_kernel.real)
#  nx,ny = data.shape
#  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))
  i = (ndi.convolve(data,g_kernel.real) + 1j*ndi.convolve(data,g_kernel.imag))
#  phplot.dBshow(np.fft.fft2(i))
  r = plane_wave(X,Y,kx,ky)
  o = i*r
  ph = np.arctan2(o.imag,o.real)
#  phplot.imageshow(p)
  return ph
Пример #4
0
def compute_lima_on_off_image(n_on, n_off, a_on, a_off, kernel, exposure=None):
    """
    Compute Li&Ma significance and flux images for on-off observations.

    Parameters
    ----------
    n_on : `~numpy.ndarray`
        Counts image
    n_off : `~numpy.ndarray`
        Off counts image
    a_on : `~numpy.ndarray`
        Relative background efficiency in the on region
    a_off : `~numpy.ndarray`
        Relative background efficiency in the off region
    kernel : `astropy.convolution.Kernel2D`
        Convolution kernel
    exposure : `~numpy.ndarray`
        Exposure image

    Returns
    -------
    images : `~gammapy.image.SkyImageList`
        Results images container

    See also
    --------
    gammapy.stats.significance_on_off
    """
    from scipy.ndimage import convolve

    # Kernel is modified later make a copy here
    kernel = deepcopy(kernel)

    if not kernel.is_bool:
        log.warn('Using weighted kernels can lead to biased results.')

    kernel.normalize('peak')
    conv_opt = dict(mode='constant', cval=np.nan)

    n_on_conv = convolve(n_on, kernel.array, **conv_opt)
    a_on_conv = convolve(a_on, kernel.array, **conv_opt)
    alpha_conv = a_on_conv / a_off
    background_conv = alpha_conv * n_off
    excess_conv = n_on_conv - background_conv
    significance_conv = significance_on_off(n_on_conv, n_off, alpha_conv, method='lima')

    images = SkyImageList([
        SkyImage(name='significance', data=significance_conv),
        SkyImage(name='n_on', data=n_on_conv),
        SkyImage(name='background', data=background_conv),
        SkyImage(name='excess', data=excess_conv),
        SkyImage(name='alpha', data=alpha_conv),
    ])

    # TODO: should we be doing this here?
    # Wouldn't it be better to let users decide if they want this,
    # and have it easily accessible as an attribute or method?
    _add_other_images(images, exposure, kernel, conv_opt)

    return images
Пример #5
0
def skeletonize_mitochondria(mCh_channel):

    mch_collector = np.max(mCh_channel, axis=0)  # TODO: check how max affects v.s. sum
    labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # TODO: use adaptative threshold? => otsu seems to be sufficient in this case
    # http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html#sphx
    # -glr-auto-examples-xx-applications-plot-thresholding-py
    #  log-transform? => Nope, does not work
    # TODO: hessian/laplacian of gaussian blob detection?

    labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(labels)
    skeleton, distance = medial_axis(labels, return_distance=True)
    active_threshold = np.mean(mch_collector[labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] \
                                              / divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return labels, mch_collector, skeleton, transform_filter
Пример #6
0
def energy_image (im):
    gray_img = rgb2gray(im)
    double_img = im2double(gray_img)
    xconvolution = ndimage.convolve(double_img, xGrad, mode='constant', cval=0.0)
    yconvolution = ndimage.convolve(double_img, yGrad,  mode='constant', cval=0.0)
    gradient_img = np.sqrt(np.square(xconvolution) + np.square(yconvolution))
    return im2double(gradient_img)
Пример #7
0
 def optimal_extract(self, data, bin=0):        
     import scipy.ndimage as nd
     
     wave = (np.arange(self.shg[1])+1-self.cutout_dimensions[1])*(self.lam[1]-self.lam[0]) + self.lam[0]
     
     if not hasattr(self, 'opt_profile'):
         m = self.compute_model(self.thumb, id=self.id, in_place=False).reshape(self.shg)
         m[m < 0] = 0
         self.opt_profile = m/m.sum(axis=0)
         
     num = self.opt_profile*data*self.ivar.reshape(self.shg)
     den = self.opt_profile**2*self.ivar.reshape(self.shg)
     opt = num.sum(axis=0)/den.sum(axis=0)
     opt_var = 1./den.sum(axis=0)
     
     if bin > 0:
         kern = np.ones(bin, dtype=float)/bin
         opt = nd.convolve(opt, kern)[bin/2::bin]
         opt_var = nd.convolve(opt_var, kern**2)[bin/2::bin]
         wave = wave[bin/2::bin]
         
     opt_rms = np.sqrt(opt_var)
     opt_rms[opt_var == 0] = 0
     
     return wave, opt, opt_rms
     
     
def skeletonize_mitochondria(mch_channel):
    mch_collector = np.max(mch_channel, axis=0)  # TODO: check max projection v.s. sum
    skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # use adaptative threshold? => otsu seems to be sufficient in this case

    skeleton_labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(skeleton_labels)
    skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
    active_threshold = np.mean(mch_collector[skeleton_labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return skeleton_labels, mch_collector, skeleton, transform_filter
    def run_iteration(self, update_mask=True):
        """Run one iteration."""
        # Start with images from the last iteration
        images = self._data[-1]
        
        logging.info('*** INPUT IMAGES ***')
        images.print_info()

        # Compute new exclusion mask:
        if update_mask:
            logging.info('Computing new exclusion mask')
            mask = np.where(images.significance > self.significance_threshold, 0, 1)
            #print('===', (mask == 0).sum())
            mask = np.invert(binary_dilation_circle(mask == 0, radius=self.mask_dilation_radius))
            #print('===', (mask == 0).sum())
        else:
            mask = images.mask.copy()
        
        # Compute new background estimate:
        # Convolve old background estimate with background kernel,
        # excluding sources via the old mask.
        weighted_counts = convolve(images.mask * images.counts, self.background_kernel)
        weighted_counts_normalisation = convolve(images.mask.astype(float), self.background_kernel)
        background = weighted_counts / weighted_counts_normalisation
        
        # Store new images
        images = GammaImages(counts, background, mask)
        logging.info('Computing source kernel correlated images.')
        images.compute_correlated_maps(self.source_kernel)

        logging.info('*** OUTPUT IMAGES ***')
        images.print_info()
        self._data.append(images)
Пример #10
0
    def smooth(self,sigma,compute_var=False,summed=False):

        sigma /= 1.5095921854516636        
        sigma /= np.abs(self._axes[0]._delta)
        
        from scipy import ndimage
        im = SkyImage(copy.deepcopy(self.wcs),
                      copy.deepcopy(self.axes()),
                      copy.deepcopy(self._counts),
                      self.roi_radius,
                      copy.deepcopy(self._roi_msk))

        # Construct a kernel
        nk =41
        fn = lambda t, s: 1./(2*np.pi*s**2)*np.exp(-t**2/(s**2*2.0))
        b = np.abs(np.linspace(0,nk-1,nk) - (nk-1)/2.)
        k = np.zeros((nk,nk)) + np.sqrt(b[np.newaxis,:]**2 +
                                        b[:,np.newaxis]**2)
        k = fn(k,sigma)
        k /= np.sum(k)

        im._counts = ndimage.convolve(self._counts,k,mode='nearest')
        
#        im._counts = ndimage.gaussian_filter(self._counts, sigma=sigma,
#                                             mode='nearest')

        if compute_var:
            var = ndimage.convolve(self._counts, k**2, mode='wrap')
            im._var = var
        else:
            im._var = np.zeros(im._counts.shape)
            
        if summed: im /= np.sum(k**2)
            
        return im
Пример #11
0
 def update(self, t_end, sink, source):
     """ Solves the system over using the predetermined time step dt
         until the end time of the simulation is reached.
         t_end - the end time to solve the system towards
     """
     t = 0
     epsilon = 1E-10
     diff = epsilon  * 2
     zeros = np.zeros(self.Ci.shape)
     while(t <= t_end and diff >= epsilon):
         #solve for the gradients in each direction
         l_x = ndimage.convolve(self.Ci, self._lx, mode = "constant",
                                cval = self._c_out)
         l_y = ndimage.convolve(self.Ci, self._ly, mode = "constant",
                                cval = self._c_out)
         l_z = ndimage.convolve(self.Ci, self._lz, mode = "constant",
                                cval = self._c_out)
         #first diffusion
         self.C = self.Ci + (l_x + l_y + l_z)*self._D*self.dt
         #MUST BE normalized by unit VOLUME
         temp_sink = (-sink*self.dt) / self._grid_vol
         temp_source = source*self.dt / self._grid_vol
         self.C += temp_sink + temp_source
         #get the summed difference
         diff = np.sum(np.abs(self.Ci - self.C))
         #make sure its positive
         self.C = self.C * (self.C > 0.0)
         #update the old
         self.Ci = self.C
         #update the time step
         t += self.dt
Пример #12
0
def filter(data,filtType,par):

    if   filtType == "sobel":       filt_data = sobel(data)
    elif filtType == "roberts":     filt_data = roberts(data)
    elif filtType == "canny":       filt_data = canny(data)
    elif filtType == "lowpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        filt_data = ndimage.convolve(data, kernel)
    elif filtType == "highpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        lp_data = ndimage.convolve(data, kernel)
        filt_data = data - lp_data
    elif filtType == "lowpass_gaussian":
        filt_data = gaussian(data, sigma=float(par))
    elif filtType == "highpass_gaussian":
        lp_data   = gaussian(data, sigma=float(par))
        filt_data = data - lp_data

    #elif filtType ==  "gradient":
       
    return filt_data
Пример #13
0
        def con(k):
            c = convolve(convolve(fimage, k, mode='nearest'),
                         k,
                         mode='nearest')
            c = (c <= (0 + bias)) & ~lowerbound

            return cv2.morphologyEx(c.astype(np.uint8), cv2.MORPH_OPEN, np.ones((self.msize,self.msize),np.uint8))
Пример #14
0
    def run(self, inputs, run_id):
        pstore = self.pstore(run_id)
        
        arr = inputs[0]
        kernel = inputs[1]

        ar, ac = arr.shape
        kr, kc = kernel.shape[0]/2, kernel.shape[1]/2

        start = time.time()
        if pstore.uses_mode(Mode.FULL_MAPFUNC):
            pstore.set_fanins([1,reduce(mul, kernel.shape)])
            pstore.set_inareas([1,reduce(mul, kernel.shape)])
            pstore.set_outarea(1)
            pstore.set_ncalls(reduce(mul, arr.shape))
            pstore.set_noutcells(reduce(mul, arr.shape))

        if pstore.uses_mode(Mode.PTR):
            for rid in xrange(ar):
                for cid in xrange(ac):
                    minr, maxr = (max(0,rid - kr), min(ar, rid + kr+1))
                    minc, maxc = (max(0,cid - kc), min(ac, cid + kc+1))
                    prov0 = [(px, py) for px in xrange(minr, maxr) for py in xrange(minc, maxc)]
                    prov1 = [(kx, ky) for kx in range(maxr-minr) for ky in xrange(maxc-minc)]
                    pstore.write(((rid, cid),), prov0, prov1)

        if pstore.uses_mode(Mode.PT_MAPFUNC):
            for x in xrange(ar):
                for y in xrange(ac):
                    pstore.write(((x,y),), '')
        end = time.time()

        output = np.empty(arr.shape, float)
        ndimage.convolve(arr, kernel, output=output, mode='constant', cval=0.0)
        return output, {'provoverhead' : end - start}
Пример #15
0
def nudge_dataset(X, y):
    """
    This produces a dataset 8 times bigger than the original one,
    by moving the 8x8 images in X around by 8 directions
    """
    direction_vectors = [
        [[0, 1, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [1, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 1],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 1, 0]],

        [[1, 0, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 1],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [1, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 0, 1]]
    ]

    new_images = []
    for vectors in direction_vectors:
        new_images.append(convolve(X[0].reshape((28, 28)), vectors, mode='constant'))
    new_images.append(X[0].reshape((28, 28)))
    f, axarr = plt.subplots(3, 3)
    for i in range(3):
        for j in range(3):
            axarr[i, j].imshow(new_images[3 * i + j], cmap='gray')

    plt.show()

    shift = lambda x, w: convolve(x.reshape((28, 28)), mode='constant',
                                  weights=w).ravel()
    X = np.concatenate([X] +
                       [np.apply_along_axis(shift, 1, X, vector)
                        for vector in direction_vectors])
    print X.shape
    y = np.concatenate([y for _ in range(len(direction_vectors) + 1)], axis=0)
    print y.shape
    return X, y
    def compute_correlated_maps(self, kernel):
        """Compute significance image for a given kernel.
        """
        self.counts_corr = convolve(self.counts, kernel)
        self.background_corr = convolve(self.background, kernel)
        self.significance = significance(self.counts_corr, self.background_corr)

        return self
Пример #17
0
    def convolve_raw(self, frame, frequency=0.5, theta=0):

        kernel = gabor_kernel(frequency, theta=theta)

        real = ndi.convolve(image, np.real(kernel), mode='wrap')
        imag = ndi.convolve(image, np.imag(kernel), mode='wrap')

        return real, imag
Пример #18
0
def apply_robert(img):

	Dx  = array([[1,0],[0,-1]])
	Dy  = array([[0,1],[-1,0]])
	imx = ndimage.convolve(img,Dx)
	imy = ndimage.convolve(img,Dy)
	grad = sqrt(imx**2+imy**2)
	return imx,imy,grad
Пример #19
0
def compute_lima_on_off_map(n_on, n_off, a_on, a_off, kernel, exposure=None):
    """
    Compute Li&Ma significance and flux maps for on-off observations.

    Parameters
    ----------
    n_on : `~numpy.ndarray`
        Counts map.
    n_off : `~numpy.ndarray`
        Off counts map.
    a_on : `~numpy.ndarray`
        Relative background efficiency in the on region
    a_off : `~numpy.ndarray`
        Relative background efficiency in the off region
    kernel : `astropy.convolution.Kernel2D`
        convolution kernel.
    exposure : `~numpy.ndarray`
        Exposure map.

    Returns
    -------
    SkyImageCollection : `~gammapy.image.SkyImageCollection`
        Bunch of result maps.

    See also
    --------
    gammapy.stats.significance_on_off

    """
    from scipy.ndimage import convolve

    # Kernel is modified later make a copy here
    kernel = deepcopy(kernel)

    if not kernel.is_bool:
        log.warn('Using weighted kernels can lead to biased results.')

    kernel.normalize('peak')
    n_on_ = convolve(n_on, kernel.array, mode='constant', cval=np.nan)
    a_ = convolve(a_on, kernel.array, mode='constant', cval=np.nan)
    alpha = a_ / a_off
    background = alpha * n_off

    significance_lima = significance_on_off(n_on_, n_off, alpha, method='lima')

    result = SkyImageCollection(significance=significance_lima,
                                n_on=n_on_,
                                background=background,
                                excess=n_on_ - background,
                                alpha=alpha)

    if not exposure is None:
        kernel.normalize('integral')
        exposure_ = convolve(exposure, kernel.array, mode='constant', cval=np.nan)
        flux = (n_on_ - background_) / exposure_
        result.flux = flux

    return result
Пример #20
0
 def coherence(self, kernel):
     numerator = convolve(self.power(), kernel)
     
     denominator = (convolve(abs(self.wave[:,:,0])**2, kernel)
                 * convolve(abs(self.wave[:,:,1])**2, kernel))**0.5,
     #denominator = convolve((abs(self.wave[:,:,0])**2 * abs(self.wave[:,:,1])**2)**0.5,
     #                kernel)
                            
     return (numerator / denominator).reshape((self.scales.size, self.series.shape[0]))
Пример #21
0
    def __bigF(self, c, f, win):

        arg1 = 1 / c
        arg2 = f * (1 / c)

        avg1 = ndimage.convolve(arg1, win, mode="nearest")
        avg2 = ndimage.convolve(arg2, win, mode="nearest")

        return (1 / avg1) * avg2
Пример #22
0
def bigF(c, f, win):

	arg1 = 1 / c
	arg2 = f * (1 / c)

	avg1 = ndimage.convolve(arg1, win, mode='nearest')
	avg2 = ndimage.convolve(arg2, win, mode='nearest')

	return (1 / avg1) * avg2
Пример #23
0
 def computeGradients(self, pixelBlock, props):
     # pixel size in input raster SR...
     p = props['cellSize'] if self.sr is None else projectCellSize(props['cellSize'], props['spatialReference'], self.sr, self.proj)
     if p is not None and len(p) == 2:
         p = np.multiply(p, 1.11e5 if isGeographic(self.sr) else 1.)   # conditional degrees to meters conversion
         xs, ys = (self.zf + (np.power(p, self.ce) * self.cf)) / (8*p)
     else:
         xs, ys = 1., 1.         # degenerate case. shouldn't happen.
     return (ndimage.convolve(pixelBlock, self.xKernel)*xs, ndimage.convolve(pixelBlock, self.yKernel)*ys)
Пример #24
0
def roberts_cross( infilename, outfilename ) :
    image = load_image( infilename )

    vertical = ndimage.convolve( image, roberts_cross_v )
    horizontal = ndimage.convolve( image, roberts_cross_h )

    output_image = np.sqrt( np.square(horizontal) + np.square(vertical))

    save_image( output_image, outfilename )
Пример #25
0
    def __bigA(self, a, f, c, l, win):

        arg1 = a - (f ** 2) * (1 / c)
        arg2 = 1 / c
        arg3 = f * (1 / c)
        avg1 = ndimage.convolve(arg1, win, mode="nearest")
        avg2 = ndimage.convolve(arg2, win, mode="nearest")
        avg3 = ndimage.convolve(arg3, win, mode="nearest")
        return avg1 + (1 / avg2) * avg3 ** 2
Пример #26
0
    def update(self):
        """Update network output"""

        sh = self.weights.shape

        ndimage.convolve(self.inputVolume,
                         self.weights / float(sh[0] * sh[1] * sh[2]),
                         output=self.outputVolume)
        self.outputVolume[:] = tanh(self.outputVolume)[:]
Пример #27
0
def compute_lima_image(counts, background, kernel, exposure=None):
    """
    Compute Li&Ma significance and flux images for known background.

    If exposure is given the corresponding flux image is computed and returned.

    Parameters
    ----------
    counts : `~numpy.ndarray`
        Counts image
    background : `~numpy.ndarray`
        Background image
    kernel : `astropy.convolution.Kernel2D`
        Convolution kernel
    exposure : `~numpy.ndarray`
        Exposure image

    Returns
    -------
    images : `~gammapy.image.SkyImageList`
        Results images container

    See Also
    --------
    gammapy.stats.significance
    """
    from scipy.ndimage import convolve

    wcs = counts.wcs.copy()
    # Kernel is modified later make a copy here
    kernel = deepcopy(kernel)

    if not kernel.is_bool:
        log.warn('Using weighted kernels can lead to biased results.')

    kernel.normalize('peak')
    conv_opt = dict(mode='constant', cval=np.nan)

    counts_conv = convolve(counts, kernel.array, **conv_opt)
    background_conv = convolve(background, kernel.array, **conv_opt)
    excess_conv = counts_conv - background_conv
    significance_conv = significance(counts_conv, background_conv, method='lima')

    images = SkyImageList([
        SkyImage(name='significance', data=significance_conv, wcs=wcs),
        SkyImage(name='counts', data=counts_conv, wcs=wcs),
        SkyImage(name='background', data=background_conv, wcs=wcs),
        SkyImage(name='excess', data=excess_conv, wcs=wcs),
    ])

    # TODO: should we be doing this here?
    # Wouldn't it be better to let users decide if they want this,
    # and have it easily accessible as an attribute or method?
    _add_other_images(images, exposure, kernel, conv_opt)

    return images
Пример #28
0
    def update(self):
        """
        Update the model for a single time step.
        """
        ndimage.convolve(self._elevation, self._stencil,
                         output=self._temp_elevation)
        self._set_bc(self._temp_elevation)
        self._elevation[:] = self._temp_elevation

        self._time += self._time_step
Пример #29
0
def next_step(state):
    neighbors = game_tmp
    ndimage.convolve(state, kernel, output=neighbors)
    np.greater_equal(neighbors, 2, out=game_bool1)
    np.less_equal(neighbors, 3, out=game_bool2)
    np.multiply(game_bool1, game_bool2, out=game_bool1)
    np.multiply(state, game_bool1, out=state)
    np.equal(neighbors, 3, out=game_bool1)
    np.add(state, game_bool1, out=state)
    np.clip(state + game_bool1, 0, 1, out=state)
Пример #30
0
    def _get_variables(self, variables, profiles, profiles_depth,
                       time, x, y, z, block):
        """Wrapper around reader-specific function get_variables()

        Performs some common operations which should not be duplicated:
        - monitor time spent by this reader
        - convert any numpy arrays to masked arrays
        """

        logging.debug('Fetching variables from ' + self.name)
        self.timer_start('reading')
        if profiles is not None and block is True:
            # If profiles are requested for any parameters, we
            # add two fake points at the end of array to make sure that the
            # requested block has the depth range required for profiles
            x = np.append(x, [x[-1], x[-1]])
            y = np.append(y, [y[-1], y[-1]])
            z = np.append(z, [profiles_depth[0], profiles_depth[1]])
        env = self.get_variables(variables, time, x, y, z, block)

        # Make sure x and y are floats (and not e.g. int64)
        if 'x' in env.keys():
            env['x'] = np.array(env['x'], dtype=np.float)
            env['y'] = np.array(env['y'], dtype=np.float)

        # Convert any masked arrays to NumPy arrays
        for variable in env.keys():
            if isinstance(env[variable], np.ma.MaskedArray):
                env[variable] = env[variable].filled(np.nan)

        # Convolve arrays with a kernel, if reader.convolve is set
        if hasattr(self, 'convolve'):
            from scipy import ndimage
            N = self.convolve
            if isinstance(N, (int, np.integer)):
                kernel = np.ones((N, N))
                kernel = kernel/kernel.sum()
            else:
                kernel = N
            logging.debug('Convolving variables with kernel: %s' % kernel)
            for variable in env.keys():
                if variable in ['x', 'y', 'z', 'time']:
                    pass
                else:
                    if env[variable].ndim == 2:
                        env[variable] = ndimage.convolve(
                            env[variable], kernel, mode='nearest')
                    elif env[variable].ndim == 3:
                        env[variable] = ndimage.convolve(
                            env[variable], kernel[:,:,None],
                            mode='nearest')

        self.timer_end('reading')

        return env
Пример #31
0
def filter_data(data, kernel, mode='constant', fill_value=0.0,
                check_normalization=False):
    """
    Convolve a 2D image with a 2D kernel.

    The kernel may either be a 2D `~numpy.ndarray` or a
    `~astropy.convolution.Kernel2D` object.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    kernel : array-like (2D) or `~astropy.convolution.Kernel2D`
        The 2D kernel used to filter the input ``data``. Filtering the
        ``data`` will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
        The ``mode`` determines how the array borders are handled.  For
        the ``'constant'`` mode, values outside the array borders are
        set to ``fill_value``.  The default is ``'constant'``.

    fill_value : scalar, optional
        Value to fill data values beyond the array borders if ``mode``
        is ``'constant'``.  The default is ``0.0``.

    check_normalization : bool, optional
        If `True` then a warning will be issued if the kernel is not
        normalized to 1.
    """

    from scipy import ndimage

    if kernel is not None:
        if isinstance(kernel, Kernel2D):
            kernel_array = kernel.array
        else:
            kernel_array = kernel

        if check_normalization:
            if not np.allclose(np.sum(kernel_array), 1.0):
                warnings.warn('The kernel is not normalized.',
                              AstropyUserWarning)

        # scipy.ndimage.convolve currently strips units, but be explicit
        # in case that behavior changes
        unit = None
        if isinstance(data, Quantity):
            unit = data.unit
            data = data.value

        # NOTE:  astropy.convolution.convolve fails with zero-sum
        # kernels (used in findstars) (cf. astropy #1647)
        # NOTE: if data is int and kernel is float, ndimage.convolve
        # will return an int image - here we make the data float so
        # that a float image is always returned
        result = ndimage.convolve(data.astype(float), kernel_array,
                                  mode=mode, cval=fill_value)

        if unit is not None:
            result = result * unit  # can't use *= with older astropy

        return result
    else:
        return data
Пример #32
0
# In[26]:

import matplotlib.pyplot as plt
import pandas as pd
from skimage.io import imread
from scipy.ndimage import convolve
from skimage.morphology import disk
import numpy as np
import os

bone_img = imread(os.path.join("..", "common", "figures",
                               "tiny-bone.png")).astype(np.float32)
# simulate measured image
conv_kern = np.pad(disk(2), 1, "constant", constant_values=0)
meas_img = convolve(bone_img[::-1], conv_kern)
# run deconvolution
dekern = np.fft.ifft2(1 / np.fft.fft2(conv_kern))
rec_img = convolve(meas_img, dekern)[::-1]
# show result
fig, (ax_orig, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 4))
ax_orig.imshow(bone_img, cmap="bone")
ax_orig.set_title("Original Object")

ax1.imshow(meas_img, cmap="bone")
ax1.set_title("Measurement")

ax2.imshow(rec_img, cmap="bone", vmin=0, vmax=255)
ax2.set_title("Reconstructed")

# ## Indirect / Computational imaging
Пример #33
0
def make_structure_mask_border(structure_id, ref_space):
    mask = ref_space.make_structure_mask(structure_id)
    mask = convolve(mask, np.ones((3, 3, 3)), mode='constant')
    mask = ((mask < 8) & (mask > 1))
    return mask
Пример #34
0
def smooth(y, box_pts=20):
    ker = np.ones(box_pts)/box_pts
    y_smooth = convolve(y,ker,mode='reflect')
    return y_smooth
Пример #35
0
def spatially_correlated(geometry, weights=None, strel=None, **kwargs):
    r"""
    Generates pore seeds that are spatailly correlated with their neighbors.

    Parameters
    ----------
    geometry : OpenPNM Geometry object
        The Geometry object with which this model is associated.  This is
        needed to determine the size of the array to create.

    weights : list of ints, optional
        The [Nx,Ny,Nz] distances (in number of pores) in each direction that
        should be correlated.

    strel : array_like, optional (in place of weights)
        The option allows full control over the spatial correlation pattern by
        specifying the structuring element to be used in the convolution.

        The array should be a 3D array containing the strength of correlations
        in each direction.  Nonzero values indicate the strength, direction
        and extent of correlations.  The following would achieve a basic
        correlation in the z-direction:

        strel = sp.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], \
                          [[0, 0, 0], [1, 1, 1], [0, 0, 0]], \
                          [[0, 0, 0], [0, 0, 0], [0, 0, 0]]])

    Notes
    -----
    This approach uses image convolution to replace each pore seed in the
    geoemtry with a weighted average of those around it.  It then converts the
    new seeds back to a random distribution by assuming they new seeds are
    normally distributed.

    Because is uses image analysis tools, it only works on Cubic networks.

    This is the appproached used by Gostick et al [2]_ to create an anistropic
    gas diffusion layer for fuel cell electrodes.

    References
    ----------
    .. [2] J. Gostick et al, Pore network modeling of fibrous gas diffusion
           layers for polymer electrolyte membrane fuel cells. J Power Sources
           v173, pp277–290 (2007)

    Examples
    --------
    >>> import OpenPNM
    >>> pn = OpenPNM.Network.Cubic(shape=[50, 50, 50])
    >>> geom = OpenPNM.Geometry.GenericGeometry(network=pn,
    ...                                         pores=pn.Ps,
    ...                                         throats=pn.Ts)
    >>> mod = OpenPNM.Geometry.models.pore_seed.spatially_correlated
    >>> geom.add_model(propname='pore.seed',
    ...                model=mod,
    ...                weights=[2, 2, 2])
    >>> im = pn.asarray(geom['pore.seed'])

    Visualizing the end result can be done with:

    .. code-block:: python

        matplotlib.pyplot.imshow(im[:, 25, :],interpolation='none')

    """
    import scipy.ndimage as spim
    import scipy.stats as spst
    network = geometry._net
    # The following will only work on Cubic networks
    x = network._shape[0]
    y = network._shape[1]
    z = network._shape[2]
    im = _sp.rand(x, y, z)
    if strel is None:  # Then generate a strel
        if sum(weights) == 0:
            # If weights of 0 are sent, then skip everything and return rands.
            return im.flatten()
        w = _sp.array(weights)
        strel = _sp.zeros(w * 2 + 1)
        strel[:, w[1], w[2]] = 1
        strel[w[0], :, w[2]] = 1
        strel[w[0], w[1], :] = 1
    im = spim.convolve(im, strel)
    # Convolution is no longer randomly distributed, so fit a gaussian
    # and find it's seeds
    temp = im.flatten()
    x_mean = _sp.mean(temp)
    x_sigma = _sp.sqrt(1 / (temp.size - 1) * _sp.sum((temp - x_mean)**2))
    fn1 = spst.norm(loc=x_mean, scale=x_sigma)
    values = fn1.cdf(temp)
    values = values[geometry.map_pores(target=network, pores=geometry.Ps)]
    return values
Пример #36
0
def gdal_slope(dem, srs, slope, unit='DEGREES'):
    """
    Create Slope Raster
    
    TODO: Test and see if is running correctly
    """
    
    import numpy;         import math
    from osgeo            import gdal
    from scipy.ndimage    import convolve
    from glass.g.rd.rst   import rst_to_array
    from glass.g.wt.rst   import obj_to_rst
    from glass.g.prop.rst import get_cellsize, get_nodata
    
    # ################ #
    # Global Variables #
    # ################ #
    cellsize = get_cellsize(dem, gisApi='gdal')
    # Get Nodata Value
    NoData = get_nodata(dem)
    
    # #################### #
    # Produce Slope Raster #
    # #################### #
    # Get Elevation array
    arr_dem = rst_to_array(dem)
    # We have to get a array with the number of nearst cells with values
    with_data = numpy.zeros((arr_dem.shape[0], arr_dem.shape[1]))
    numpy.place(with_data, arr_dem!=NoData, 1.0)
    mask = numpy.array([[1,1,1],
                        [1,0,1],
                        [1,1,1]])
    arr_neigh = convolve(with_data, mask, mode='constant')
    numpy.place(arr_dem, arr_dem==NoData, 0.0)
    # The rate of change in the x direction for the center cell e is:
    kernel_dz_dx_left = numpy.array([[0,0,1],
                                     [0,0,2],
                                     [0,0,1]])
    kernel_dz_dx_right = numpy.array([[1,0,0],
                                     [2,0,0],
                                     [1,0,0]])
    dz_dx = (convolve(arr_dem, kernel_dz_dx_left, mode='constant')-convolve(arr_dem, kernel_dz_dx_right, mode='constant')) / (arr_neigh * cellsize)
    # The rate of change in the y direction for cell e is:
    kernel_dz_dy_left = numpy.array([[0,0,0],
                                    [0,0,0],
                                    [1,2,1]])
    kernel_dz_dy_right = numpy.array([[1,2,1],
                                    [0,0,0],
                                    [0,0,0]])
    dz_dy = (convolve(arr_dem, kernel_dz_dy_left, mode='constant')-convolve(arr_dem, kernel_dz_dy_right, mode='constant')) / (arr_neigh * cellsize)
    # Taking the rate of change in the x and y direction, the slope for the center cell e is calculated using
    rise_run = ((dz_dx)**2 + (dz_dy)**2)**0.5
    if unit=='DEGREES':
        arr_slope = numpy.arctan(rise_run) * 57.29578
    elif unit =='PERCENT_RISE':
        arr_slope = numpy.tan(numpy.arctan(rise_run)) * 100.0
    # Estimate the slope for the cells with less than 8 neigh
    aux_dem = rst_to_array(dem)
    index_vizinhos = numpy.where(arr_neigh<8)
    for idx in range(len(index_vizinhos[0])):
        # Get Value of the cell
        lnh = index_vizinhos[0][idx]
        col = index_vizinhos[1][idx]
        e = aux_dem[lnh][col]
        a = aux_dem[lnh-1][col-1]
        if a == NoData:
            a = e
        if lnh==0 or col==0:
            a=e
        b = aux_dem[lnh-1][col]
        if b == NoData:
            b = e
        if lnh==0:
            b=e
        try:
            c = aux_dem[lnh-1][col+1]
            if c == NoData:
                c=e
            if lnh==0:
                c=e
        except:
            c = e
        d = aux_dem[lnh][col-1]
        if d == NoData:
            d = e
        if col==0:
            d=e
        try:
            f = aux_dem[lnh][col+1]
            if f == NoData:
                f=e
        except:
            f=e
        try:
            g = aux_dem[lnh+1][col-1]
            if g == NoData:
                g=e
            if col==0:
                g=e
        except:
            g=e
        try:
            h = aux_dem[lnh+1][col]
            if h ==NoData:
                h = e
        except:
            h=e
        try:
            i = aux_dem[lnh+1][col+1]
            if i == NoData:
                i = e
        except:
            i=e
        dz_dx = ((c + 2*f + i) - (a + 2*d + g)) / (8 * cellsize)
        dz_dy = ((g + 2*h + i) - (a + 2*b + c)) / (8 * cellsize)
        rise_sun = ((dz_dx)**2 + (dz_dy)**2)**0.5
        if unit == 'DEGREES':
            arr_slope[lnh][col] = math.atan(rise_sun) * 57.29578
        elif unit == 'PERCENT_RISE':
            arr_slope[lnh][col] = math.tan(math.atan(rise_sun)) * 100.0
    # Del value originally nodata
    numpy.place(arr_slope, aux_dem==NoData, numpy.nan)
    #arr_slope[lnh][col] = slope_degres
    obj_to_rst(arr_slope, slope, dem)
Пример #37
0
    def track_shm(self, debug=False):
        if self.sphere_coverage > 7 or self.sphere_coverage < 1:
            raise ValueError("sphere coverage must be between 1 and 7")
        verts, edges, faces = create_half_unit_sphere(self.sphere_coverage)
        verts, pot = disperse_charges(verts, 10, .3)

        data, voxel_size, affine, fa, bvec, bval = self.all_inputs.read_data()
        self.voxel_size = voxel_size
        self.affine = affine
        self.shape = fa.shape

        model_type = all_shmodels[self.model_type]
        model = model_type(self.sh_order, bval, bvec, self.Lambda)
        model.set_sampling_points(verts, edges)

        data = np.asarray(data, dtype='float', order='C')
        if self.smoothing_kernel is not None:
            kernel = self.smoothing_kernel.get_kernel()
            convolve(data, kernel, out=data)

        normalize_data(data, bval, self.min_signal, out=data)
        dmin = data.min()
        data = data[..., lazy_index(bval > 0)]
        if self.bootstrap_input:
            if self.bootstrap_vector.size == 0:
                n = data.shape[-1]
                self.bootstrap_vector = np.random.randint(n, size=n)
            H = hat(model.B)
            R = lcr_matrix(H)
            data = bootstrap_data_array(data, H, R, self.bootstrap_vector)
            data.clip(dmin, out=data)

        mask = fa > self.fa_threshold
        targets = [read_roi(tgt, shape=self.shape) for tgt in self.targets]
        if self.stop_on_target:
            for target_mask in targets:
                mask = mask & ~target_mask

        seed_mask = read_roi(self.seed_roi, shape=self.shape)
        seeds = seeds_from_mask(seed_mask, self.seed_density, voxel_size)

        if ((self.interpolator == 'NearestNeighbor' and not self.probabilistic
             and not debug)):
            using_optimze = True
            peak_finder = NND_ClosestPeakSelector(model, data, mask,
                                                  voxel_size)
        else:
            using_optimze = False
            interpolator_type = all_interpolators[self.interpolator]
            interpolator = interpolator_type(data, voxel_size, mask)
            peak_finder = ClosestPeakSelector(model, interpolator)

        # Set peak_finder parameters for start steps
        peak_finder.angle_limit = 90
        model.peak_spacing = self.min_peak_spacing
        if self.seed_largest_peak:
            model.min_relative_peak = 1
        else:
            model.min_relative_peak = self.min_relative_peak

        data_ornt = nib.io_orientation(self.affine)
        best_start = reorient_vectors(self.start_direction, 'ras', data_ornt)
        start_steps = closest_start(seeds, peak_finder, best_start)

        if self.probabilistic:
            interpolator = ResidualBootstrapWrapper(interpolator,
                                                    model.B,
                                                    min_signal=dmin)
            peak_finder = ClosestPeakSelector(model, interpolator)
        elif using_optimze and self.seed_largest_peak:
            peak_finder.reset_cache()

        # Reset peak_finder parameters for tracking
        peak_finder.angle_limit = self.max_turn_angle
        model.peak_spacing = self.min_peak_spacing
        model.min_relative_peak = self.min_relative_peak

        integrator = BoundryIntegrator(voxel_size, overstep=.1)
        streamlines = generate_streamlines(peak_finder, integrator, seeds,
                                           start_steps)
        if self.track_two_directions:
            start_steps = -start_steps
            streamlinesB = generate_streamlines(peak_finder, integrator, seeds,
                                                start_steps)
            streamlines = merge_streamlines(streamlines, streamlinesB)

        for target_mask in targets:
            streamlines = target(streamlines, target_mask, voxel_size)

        return streamlines
Пример #38
0
def ttFindSpec(xdisp, xtract_info, life_adj_offset, xd_range, box):
    """Find the location in the cross-dispersion direction.

    Parameters
    ----------
    xdisp: array_like
        The cross-dispersion profile, 1-D array of time-tag data collapsed
        along the dispersion axis, but taking into account the tilt of the
        spectrum.

    xtract_info: array_like
        Data block (but just one row) from the xtractab.

    life_adj_offset: float
        Normally this will be 0.  If the LIFE_ADJ keyword is -1, however,
        indicating that the aperture block is not at one of the recognized
        "lifetime positions," life_adj_offset will be the expected offset
        (in pixels) of the wavecal spectrum from lifetime position 1.

    xd_range: int
        Search within + or - xd_range from the nominal location for the
        peak in xdisp.

    box: int
        Smooth xdisp with a box of this width before looking for the
        maximum.

    Returns
    -------
    (shift2, y): tuple of two floats
        shift2 is the shift from nominal in the cross-dispersion
        direction (or None), and y is the location of the spectrum.
        The location is based on fitting a quadratic to points near the
        maximum.  Note that the data were collapsed to the left edge to
        get xdisp, so the location is the intercept on the edge, rather
        than where the spectrum crosses the middle of the detector.
    """

    y_nominal = xtract_info.field("b_spec")[0] + life_adj_offset
    segment = xtract_info.field("segment")[0]  # for possible warning message

    # The values of y_nominal and xd_range should be such that neither
    # y0 nor y1 will be less than zero or greater than 1023.
    y0 = int(round(y_nominal - xd_range))
    y1 = int(round(y_nominal + xd_range)) + 1
    if y0 < 0 or y1 >= len(xdisp):
        cosutil.printWarning("XD_RANGE in WCPTAB is too large.")
        y0 = max(y0, 0)
        y1 = min(y1, len(xdisp) - 1)

    boxcar_kernel = scipysignal.boxcar(box) / box
    xdisp_sm = ndimage.convolve(xdisp, boxcar_kernel, mode="nearest")
    len_xdisp_sm = len(xdisp_sm)

    if y0 >= y1:
        return (None, 0.)
    index = np.argsort(xdisp_sm[y0:y1])
    y = y0 + index[-1]
    signal = xdisp_sm[y]  # value in smoothed array
    # Check for duplicate values.
    y_min = y
    y_max = y
    while y_min > 0 and xdisp_sm[y_min] == signal:
        y_min -= 1
    while y_max < len_xdisp_sm and xdisp_sm[y_max] == signal:
        y_max += 1
    y_float = float(y_min + y_max) / 2.
    y = int(round(y_float))

    # Fit a quadratic to the smoothed curve near the peak.
    fit_range = (y_max - y_min) + box
    if fit_range < xd_range:
        r0 = y - fit_range // 2
        r1 = r0 + fit_range
        r0 = max(r0, 0)
        r1 = min(r1, len_xdisp_sm)
        r0 = r1 - fit_range
        x = np.arange(fit_range, dtype=np.float64)
        (coeff, var) = cosutil.fitQuadratic(x, xdisp_sm[r0:r1])
        (y_temp, y_float_sigma) = cosutil.centerOfQuadratic(coeff, var)
        if y_temp is None:
            return (None, 0.)
        y_float = y_temp + r0

    # Find the background level.
    i = index[(y1 - y0) // 2]
    background = xdisp_sm[y0 + i]  # median of smoothed array

    sigma_s = math.sqrt(signal * box)
    sigma_b = math.sqrt(background * box)
    sigma_s_b = math.sqrt(sigma_s**2 + sigma_b**2)
    if sigma_s_b > 0.:
        signal_to_noise = (signal - background) * box / sigma_s_b
    else:
        signal_to_noise = 0.

    if signal_to_noise >= 5.:
        shift2 = y_float - y_nominal + life_adj_offset
    else:
        shift2 = None

    return (shift2, y_float)
Пример #39
0
img = Image.open(
    './data/angela.jpg')  # opens the file using Pillow - it's not an array yet
x = np.asfortranarray(im2nparray(img))
x = np.mean(x, axis=2)
x = np.maximum(x, 0.0)

# Kernel
K = Image.open('./data/kernel_snake.png'
               )  # opens the file using Pillow - it's not an array yet
K = np.mean(np.asfortranarray(im2nparray(K)), axis=2)
K = np.maximum(cv2.resize(K, (15, 15), interpolation=cv2.INTER_LINEAR), 0)
K /= np.sum(K)

# Generate observation
sigma_noise = 0.01
b = ndimage.convolve(x, K,
                     mode='wrap') + sigma_noise * np.random.randn(*x.shape)

# Display data
plt.ion()
plt.figure()
imgplot = plt.imshow(x, interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Original Image')
# plt.show()

plt.figure()
imgplot = plt.imshow(K / np.amax(K), interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('K')
# plt.show()
Пример #40
0
def show_MultiAccum_reads(raw='ibp329isq_raw.fits', flatten_ramp=False, verbose=True, stats_region=[[0,1014], [0,1014]]):
    """
    Make a figure (.ramp.png) showing the individual reads of an 
    IMA or RAW file.
    """    
    import scipy.ndimage as nd

    import matplotlib.pyplot as plt
    
    from matplotlib.figure import Figure
    from matplotlib.backends.backend_agg import FigureCanvasAgg
    
    if verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.WARN)

    status = fetch_calibs(raw) #, ftpdir='ftp://ftp.stsci.edu/cdbs/iref/')
    if not status:
        return False
        
    img = pyfits.open(raw)
    
    if 'raw' in raw:
        gains = [2.3399999, 2.3699999, 2.3099999, 2.3800001]
        gain = np.zeros((1024,1024))
        gain[512: ,0:512] += gains[0]
        gain[0:512,0:512] += gains[1]
        gain[0:512, 512:] += gains[2]
        gain[512: , 512:] += gains[3]
    else:
        gain=1
    
    logger.info('Make MULTIACCUM cube')
        
    #### Split the multiaccum file into individual reads    
    cube, dq, times, NSAMP = split_multiaccum(img, scale_flat=False)
    
    if 'raw' in raw:
        dark_file = img[0].header['DARKFILE'].replace('iref$', os.getenv('iref')+'/')
        dark = pyfits.open(dark_file)
        dark_cube, dark_dq, dark_time, dark_NSAMP = split_multiaccum(dark, scale_flat=False)

        diff = np.diff(cube-dark_cube[:NSAMP,:,:], axis=0)*gain
        dt = np.diff(times)
    
        #### Need flat for Poisson
        flat_im, flat = get_flat(img)
        diff /= flat
    else:
        diff = np.diff(cube, axis=0)
        dt = np.diff(times)
    
    ####  Average ramp
    slx = slice(stats_region[0][0], stats_region[0][1])
    sly = slice(stats_region[1][0], stats_region[1][1])
    ramp_cps = np.median(diff[:, sly, slx], axis=1)
    avg_ramp = np.median(ramp_cps, axis=1)
    
    #### Initialize the figure
    logger.info('Make plot')
    
    plt.ioff()
    #fig = plt.figure(figsize=[10,10])
    fig = Figure(figsize=[10,10])

    ## Smoothing
    smooth = 1
    kernel = np.ones((smooth,smooth))/smooth**2
    
    ## Plot the individual reads
    for j in range(1,NSAMP-1):
        ax = fig.add_subplot(4,4,j)
        smooth_read = nd.convolve(diff[j,:,:],kernel)
        ax.imshow(smooth_read[5:-5:smooth, 5:-5:smooth]/dt[j], 
                  vmin=0, vmax=4, origin='lower', cmap=plt.get_cmap('cubehelix'))
        
        ax.set_xticklabels([]); ax.set_yticklabels([])
        ax.text(20,5,'%d' %(j), ha='left', va='bottom', backgroundcolor='white')
    
    ## Show the ramp
    fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
    
    ax = fig.add_axes((0.6, 0.05, 0.37, 0.18))
    #ax = fig.add_subplot(428)
    ax.plot(times[2:], (ramp_cps[1:,16:-16:4].T/np.diff(times)[1:]).T, 
            alpha=0.1, color='black')
    ax.plot(times[2:], avg_ramp[1:]/np.diff(times)[1:], alpha=0.8, 
            color='red', linewidth=2)
    ax.set_xlabel('time'); ax.set_ylabel('background [e/s]')

    #fig.tight_layout(h_pad=0.3, w_pad=0.3, pad=0.5)
    root=raw.split('_')[0]
    #plt.savefig(root+'_ramp.png')
    
    canvas = FigureCanvasAgg(fig)
    canvas.print_figure(root+'_ramp.png', dpi=200)
    
    #### Same ramp data file    
    np.savetxt('%s_ramp.dat' %(root), np.array([times[1:], avg_ramp/np.diff(times)]).T, fmt='%.3f')
    
    if flatten_ramp:
        #### Flatten the ramp by setting background countrate to the average.  
        #### Output saved to "*x_flt.fits" rather than the usual *q_flt.fits.
        
        flux = avg_ramp/np.diff(times)
        avg = avg_ramp.sum()/times[-1]
        min = flux[1:].min()
        subval = np.cumsum((flux-avg)*np.diff(times))
        
        imraw = pyfits.open(raw.replace('ima','raw'))
        for i in range(1, NSAMP):
            logger.info('Remove excess %.2f e/s from read #%d (t=%.1f)' %(flux[-i]-min, NSAMP-i+1, times[-i]))
            
            imraw['SCI',i].data = imraw['SCI',i].data - np.cast[int](subval[-i]/2.36*flat)
                
        files=glob.glob(raw.split('q_')[0]+'x_*')
        for file in files:
            os.remove(file)
            
        imraw[0].header['CRCORR'] = 'PERFORM'
        imraw.writeto(raw.replace('q_raw', 'x_raw'), overwrite=True)
        
        ## Run calwf3
        #wfc3tools.calwf3(raw.replace('q_raw', 'x_raw'))
        utils.run_calwf3(raw.replace('q_raw', 'x_raw'), clean=True)
        
    return fig
Пример #41
0
import numpy as np
import cv2
from scipy import ndimage

kernel33 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])

kernel33_D = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]])

img = cv2.imread("lena.jpg", 0)
linghtImg = ndimage.convolve(img, kernel33_D)
cv2.imshow("img", linghtImg)
cv2.waitKey()
Пример #42
0
def interpolation(noise, screen_size, res_size):
    tr = np.sqrt(res_size).astype('int64')
    data = noise[:res_size].reshape(tr, tr)
    screen = np.random.rand(screen_size, screen_size)
    res = convolve(screen, data)
    return res
def smoother(data, kernel_size=3):
    kernel = np.ones([kernel_size, kernel_size]) / float(
        kernel_size * kernel_size)
    tmp = ndimage.convolve(data, kernel)
    tmp = ndimage.convolve(tmp, kernel)
    return (tmp)
Пример #44
0
def power(image, kernel):
    # Normalize images for better comparison.
    image = (image - image.mean()) / image.std()
    return np.sqrt(
        ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
        ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# import data from scikit-image library
from skimage import data
# import ndimage from scipy library
from scipy import ndimage
# import numpy library as np
import numpy as np
# from matplotlib import pyplot
import matplotlib.pyplot as plt

image = data.camera()

# create filter
# note : filter will rotate 180 degree when executed (correlation)
kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])

kernel = kernel / np.sum(kernel)

# apply convolution on image
newImage = ndimage.convolve(image, kernel, mode='constant', output=np.uint8)

# display image
plt.imshow(image, cmap='gray')
plt.title('before')
plt.show()
plt.imshow(newImage, cmap='gray')
plt.title('after')
plt.show()
def filteredimg_gray(img, filt):
    return ndimage.convolve(img, filt, mode='constant', cval=1.0)
Пример #47
0
plt.grid(True)
fig.savefig(rnnName, dpi=fig.dpi)
plt.show()

# Test the network
testLen = 10000
testErr = rnnTheano.testNetwork(testLen)
print "Test error: ", testErr

# Image edge detection test
M = np.array([[-1, -1, -1], [1, 1, 1]]).transpose()
s = 32
im = np.zeros((s, s))
s4 = s / 4
im[s4:-s4, s4:-s4] = 1
sx = ndimage.convolve(im, M)
res = np.zeros((s, s))
sm = np.zeros((s, s))
for col in xrange(1, s):
    colSet = np.reshape(im[:, col - 1:col + 1], (s, 1, nin))
    _, y = rnnTheano.train_fn(np.zeros((1, rnnTheano.nh)), colSet,
                              np.zeros((s, 1, 1)), 0)
    res[:, col] = np.reshape(y, (s, ))
    for row in xrange(1, s - 1):
        sm[row - 1:row + 2,
           col] = np.multiply(M, im[row - 1:row + 2, col - 1:col + 1]).sum()

fig = plt.figure(figsize=(16, 5))
curTime = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
filePrefix = 'rnn_2DGradientX_XSq'
rnnName = resDir + filePrefix + '-%d-%d-%d-%s.png' % (nh, nin, nout, curTime)
Пример #48
0
import numpy as np  #memanggil library numpy
import cv2  #memanggil library opencv
import matplotlib.pyplot as plt  #memanggil library matplotlib
from scipy import ndimage  #memangil library ndimage dari scipy

im = cv2.imread('E:\kuliah\mawar.jpg')
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
data = np.array(gray, dtype=float)

kernel = np.array([[-1, -1, -1, -1, -1], [-1, 1, 2, 1, -1], [-1, 2, 4, 2, -1],
                   [-1, 1, 2, 1, -1], [-1, -1, -1, -1, -1]])
highpass_5x5 = ndimage.convolve(data, kernel)

hist1, bins1 = np.histogram(highpass_5x5.flatten(), 256, [0, 256])  #
cdf1 = hist1.cumsum(
)  # membuat histogram gambar dari hasil low pass filter pada lpf
norm1 = cdf1 * hist1.max() / cdf1.max()

cv2.imshow('Grayscale', gray)
cv2.imshow('Highpass_5x5', highpass_5x5)
plt.plot(norm1, color='g')  #memberi warna hijau pada tampilan histo
plt.hist(highpass_5x5.flatten(), 256, [0, 256],
         color='b')  #memberi warna biru pada tampilan histo
plt.xlim([0, 256])
plt.legend(('cdf', 'histogram'), loc='upper left')

plt.show()

cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #49
0
    leaf_pos_dict[leaf]=np.array([])
    leaf_pos_error_dict[leaf]=np.array([])
    leaf_n_points_dict[leaf]=np.array([])


  for frame in range(0,frames):
    if (frame%15==0):
      print "frame = "+str(frame)

    startFrame=0+(1024*1024)*frame
    endFrame=0+(1024*1024)*(frame+1)

    singleFrameIm=im[startFrame:endFrame]
    singleFrameIm=singleFrameIm.reshape([1024,1024])
    sobel_kernel=np.array([[1,2,1],[0,0,0],[-1,-2,-1]])
    sobelSigned = ndimage.convolve(singleFrameIm, sobel_kernel)
    sobelAbs=np.absolute(sobelSigned)
    sobelAbs2 = np.array(sobelAbs, dtype = np.float32)

    sobelAbs2=cv2.bilateralFilter(sobelAbs2,24,5000,5000)
  
    #iterate over each leaf in a frame
    start=54 #196/4
    leafWidth=53
    nSlicesUsed=25


    for leaf in range(0,nLeaves):

      #choose y slices to use for each leaf
      sliceStart=start+leaf*leafWidth
Пример #50
0
def perimeter(image, neighbourhood=4):
    """Calculate total perimeter of all objects in binary image.

    Parameters
    ----------
    image : (N, M) ndarray
        2D binary image.
    neighbourhood : 4 or 8, optional
        Neighborhood connectivity for border pixel determination. It is used to
        compute the contour. A higher neighbourhood widens the border on which
        the perimeter is computed.

    Returns
    -------
    perimeter : float
        Total perimeter of all objects in binary image.

    References
    ----------
    .. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
           a Perimeter Estimator. The Queen's University of Belfast.
           http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc

    Examples
    --------
    >>> from skimage import data, util
    >>> from skimage.measure import label
    >>> # coins image (binary)
    >>> img_coins = data.coins() > 110
    >>> # total perimeter of all objects in the image
    >>> perimeter(img_coins, neighbourhood=4)  # doctest: +ELLIPSIS
    7796.867...
    >>> perimeter(img_coins, neighbourhood=8)  # doctest: +ELLIPSIS
    8806.268...

    """
    if image.ndim != 2:
        raise NotImplementedError('`perimeter` supports 2D images only')

    if neighbourhood == 4:
        strel = STREL_4
    else:
        strel = STREL_8
    image = image.astype(np.uint8)
    eroded_image = ndi.binary_erosion(image, strel, border_value=0)
    border_image = image - eroded_image

    perimeter_weights = np.zeros(50, dtype=np.double)
    perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
    perimeter_weights[[21, 33]] = sqrt(2)
    perimeter_weights[[13, 23]] = (1 + sqrt(2)) / 2

    perimeter_image = ndi.convolve(border_image, np.array([[10, 2, 10],
                                                           [ 2, 1,  2],
                                                           [10, 2, 10]]),
                                   mode='constant', cval=0)

    # You can also write
    # return perimeter_weights[perimeter_image].sum()
    # but that was measured as taking much longer than bincount + np.dot (5x
    # as much time)
    perimeter_histogram = np.bincount(perimeter_image.ravel(), minlength=50)
    total_perimeter = perimeter_histogram @ perimeter_weights
    return total_perimeter
Пример #51
0
def sharpen(image, factor):
    return ndi.convolve(np.array(image),[[0,0,0],[0,factor,0],[0,0,0]])
Пример #52
0
    imagen = imagen/255. 


tools.pltImg(imagen, 'Imagen ingresada')

#Conversion a YIQ para extraer luminancia
imagYIQ = tools.convert_to("YIQ",imagen)
imagY = imagYIQ[:,:,0]


#Kernels filtros detectores de bordes
sobelX = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])  #Eje X hacia la derecha
sobelY = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])  #Eje Y hacia abajo

#Filtro sobel X e Y
imgSobelX = ndimage.convolve(imagY,sobelX)
imgSobelY = ndimage.convolve(imagY,sobelY)
    
#Calculo de magnitud
magnitudSobel = np.sqrt(np.square(imgSobelX) + np.square(imgSobelY))
tools.pltImg(tools.clipImg(magnitudSobel), 'Energia de la imagen')



###### EMPIEZA EL SEAM CARVING ######
#Delete de columnas
deletedCols = 0
procImg = imagen
print('Eliminando columnas...')
while deletedCols<colsToDelete:
    print(np.round(deletedCols/(colsToDelete+rowsToDelete)*100,3),'%')
            val_fin_s_1 = min(i, m)  # Tomando el valor final de r.
            while r < val_fin_s_1 + 1:  # Iterando r hasta su valor final.
                s = max(0, j - q + 1)  # Tomando el valor inicial de s.
                val_fin_s_2 = min(j, n)  # Tomando el valor final de s.
                while s < val_fin_s_2 + 1:  # Iterando s hasta su valor final.
                    # Verificando que r, s, i-r y j-s estén dentro del rango de
                    # imagen resultante.
                    if 0 <= r < m and 0 <= s < n and 0 <= i - r < p and 0 <= j - s < q:
                        # Realizando la suma correspondiente a la convolución.
                        C[i][j] += A[r][s] * B[i - r][j - s]
                        s += 1
                    else:
                        break
                r += 1
    return C


# Probando la convolución.
A = imageio.imread("boat.jpg")
B = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
C = convolucion_2D(A, B)
C2 = ndimage.convolve(A, B, mode='constant', cval=1.0)
plt.figure()
plt.subplot(121)
plt.title("Convolucion manual")
plt.imshow(C, cmap='gray')
plt.subplot(122)
plt.title("Convolución Python")
plt.imshow(C2, cmap='gray')
plt.show()
Пример #54
0
def neighborhood_filter(image,
                        objects,
                        max_diff=0.1,
                        gap=4,
                        neighborhood_depth=4,
                        colorspace='rgb',
                        band=2,
                        return_band=False):
    """
    Calculate difference between values on either side of a long, skinny object.
    
    For pyroots, the application is differentiating hyphae or roots from the edges
    of particles. These edges sometimes pass through other filters. True objects 
    (roots and hyphae) should have more or less the same value on either side. Edges
    of larger objects, in comparision, should have a higher value on one side than the other.
    
    This function compares the values on the left and right sides, and upper and lower sides,
    of candidate objects in a grayscale image or band. Based on this difference, the candidate
    object is flagged as real or spurrious.
    
    Parameters
    ----------
    image : array
        1-band, grayscale image, or RGB color image. Converted to float automatically. 
    objects : array
        binary array of candidate objects.
    max_diff : float
        Maximum difference between values in `image` on each side of the candidate objects 
        in `objects`. The magnitude of this value varies with the `colorspace` chosen. For `'rgb'`, 
        the range is [0, 1]. 
    gap : int
        Number of pixels *beyond* each object to start measuring the neighborhood. The width
        of region between the object and the neighborhood. Useful for objects that may not fully
        capture the true object underneath. Default = 4.
    neighborhood_depth : int
        Number of pixels deep that the neighborhood should be. In intervals of 2. Default = 4.
    colorspace : float
        For accessing other colorspaces than RGB. Used to convert a color image to HSV, LAB, etc.
        See `skimage.color`. Ignored if given a 1-band image.
    band : int [0,2]
        Band index for colorspace. Ex. in RGB R=0, G=1, B=2. Ignored if `image` is 1-band. 
    return_band : bool
        Return the colorspace band as well? For diagnostics. 
        
    Returns
    -------
    A binary array of filtered objects
        
    
    """
    if colorspace.lower() is 'grey':
        colorspace = 'gray'
    if len(image.shape) == 3:
        if colorspace.lower() != 'rgb':
            image = getattr(color, 'rgb2' + colorspace)(image)
        if len(image.shape) == 3:
            image = img_split(image)[band]

    image = img_as_float(image)
    its = int((neighborhood_depth + 2) / 2)
    gap = int(gap)
    total_dilation = 2 * its
    dims = image.shape

    # neighborhood expansion kernels
    kernel_ls = [
        np.array([[0, 0, 0, 0, 0], [1, 1, 1, 0, 0], [0, 0, 0, 0, 0]]),  # left
        np.array([[0, 0, 0, 0, 0], [0, 0, 1, 1, 1], [0, 0, 0, 0, 0]]),  # right
        np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0], [0, 0,
                                                               0]]),  # up
        np.array([[0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1,
                                                               0]])  # down
    ]

    labels, labels_ls = ndimage.label(objects)
    props = measure.regionprops(labels)

    decision_ls = [False]
    for i in range(1, labels_ls + 1):
        ###############
        #### Slice ####
        ###############
        # Bounds of slice to only the object of interest
        # include a gap. Stay within bounds of image.
        a, b, c, d = props[i - 1].bbox
        a = max(a - total_dilation, 0)
        b = max(b - total_dilation, 0)
        c = min(c + total_dilation, dims[1])
        d = min(d + total_dilation, dims[0])

        # slice
        obj_slice = labels[a:c, b:d] == i
        img_slice = image[a:c, b:d]

        ########################
        ### Local expansion ####
        ########################
        expanded = ~morphology.binary_dilation(obj_slice, morphology.disk(gap))

        nb_ls = []
        median = []
        area = []
        for k in range(4):
            t = obj_slice.copy()
            for i in range(its):
                t = ndimage.convolve(t, kernel_ls[k])
            nb_ls.append(t * expanded)

            ###############################
            #### Select largest object ####
            ###############################
            nb_labels, nb_labels_ls = ndimage.label(nb_ls[k])
            nb_areas = [0] + [
                i['area'] for i in measure.regionprops(nb_labels)
            ]  # regionprops skips index 0, annoyingly
            if len(nb_areas) == 1:
                nb_areas = nb_areas + [0]
            max_area = np.max(nb_areas)
            nb_areas = nb_areas == max_area  # sometimes (rarely) more than one subregion will have the same (max) area.
            nb_ls[k] = nb_areas[nb_labels]
            area.append(max_area)

            ##############################################
            #### Find median values of largest object ####
            ##############################################
            masked = np.ma.masked_array(img_slice, ~nb_ls[k]).compressed()
            median.append(np.median(masked))

        ###############################################
        #### Calc difference (left-right, up-down) ####
        ###############################################
        area = area == np.max(area)
        if area[0] or area[1]:
            diff = np.abs(median[0] - median[1])
        else:
            diff = np.abs(median[2] - median[3])

        ###################################
        #### Test if exceeds threshold ####
        ##################################
        diff = diff < max_diff
        decision_ls.append(diff)

    out = np.array(decision_ls)[labels]

    if return_band:
        out = [out, image]

    return (out)
Пример #55
0
def tecplot2data(f, oszb, st_sz, filtering, filter):
    print(f'Generating data from {f} with stencil {st_sz[0]}x{st_sz[1]}')
    if os.path.isfile(os.path.join(f, 'res', 'oscillation.dat')):
        file_name = os.path.join(f, 'res', 'oscillation.dat')
    elif os.path.isfile(os.path.join(f, 'res', 'staticbubble.dat')):
        file_name = os.path.join(f, 'res', 'staticbubble.dat')
    elif os.path.isfile(os.path.join(f, 'res', 'gravitational.dat')):
        file_name = os.path.join(f, 'res', 'gravitational.dat')
    else:
        print('file not found')

    with open(file_name, 'r') as myfile:
        data = myfile.read()
        # Append 'zone t' to file for capturing blocks later
        data = data + '\nZONE T'
        # Get variables
        variables = re.split(
            r'"\n"',
            re.search(r'(?<=VARIABLES = ")\w+("\n"\w+)+(?=")', data)[0])
        # Bei StaticBubble müssen die nächsten beiden Zeilen auskommentiert werden
        # variables.remove('X')
        # variables.remove('Y')
        n_vars = len(variables)
        # Get gs
        gs = [
            int(i)
            for i in re.findall(r'\d+',
                                re.search(r'I=\d+, J=\d+, K=\d+', data)[0])
        ]
        [gs[0], gs[1], gs[2]] = [gs[1], gs[0], gs[2]]
        # Get all timesteps (blocks)
        blocks = re.findall(r'ZONE\sT[\d\D]+?(?=ZONE\sT)', data)
        print(f'len(blocks):\t{len(blocks)}')
        # Remove first block (no information)
        # blocks = blocks[1:]

        # Get x/y from first block
        # coordinates = {}
        # block = blocks[1]
        block = blocks[0]
        numbers = np.array(re.findall(r'(\-?\d\.\d+E[\+\-]\d{2})', block))
        # print(f'len(numbers):\t{len(numbers)}')
        print(f'gs:\t{gs}')
        coordinates = np.empty((2, gs[0], gs[1], gs[2]))
        # Get x coordinates
        coordinates[0, :, :, :] = np.reshape(numbers[:np.prod(gs)],
                                             (gs[0], gs[1], gs[2]))
        # Get y coordinates
        coordinates[1, :, :, :] = np.reshape(
            numbers[np.prod(gs):2 * np.prod(gs)], (gs[0], gs[1], gs[2]))
        max_x = np.max(coordinates[0, :, :, :])
        max_y = np.max(coordinates[1, :, :, :])
        delta = max_x / (gs[0] - 1)
        coordinates = np.reshape(coordinates[:, :, :, 0], (2, gs[0], gs[1]))

        # Coordinates are messed up, make own coordinate matrix
        coordinates = np.empty((2, gs[0] - 1, gs[1] - 1))
        cord_vec = np.reshape(np.array(range(0, gs[0] - 1) * delta),
                              (gs[0] - 1, 1))
        print(f'cord_vec.shape:\t{cord_vec.shape}')
        coordinates[0, :, :] = cord_vec
        cord_vec = np.reshape(np.array(range(0, gs[1] - 1) * delta),
                              (gs[1] - 1, 1))
        coordinates[1, :, :] = np.flip(cord_vec.T)
        '''
        cord_vec = np.reshape(np.array(range(0, gs[0]-1)*delta), (gs[0]-1, 1))
        coordinates[0, :, :] = cord_vec.T
        coordinates[1, :, :] = np.flip(cord_vec)
        '''

        # print('Blocks abgeschnitten!')
        # blocks = blocks[:1]
        print(f'len(numbers):\t{len(numbers)}')
        print(f'2*np.prod(gs):\t{2*np.prod(gs)}')

        values = np.empty((n_vars - 2, gs[0] - 1, gs[1] - 1))
        for v in variables:
            # j = 0: concentration, j = 1: curvature
            j = variables.index(v)
            # Assign next 128*128 values to variable v
            if j >= 2:
                values[j-2, :, :] = np.reshape(numbers[
                    2*np.prod(gs) + (j-2)*(gs[0]-1)*(gs[1]-1) :\
                    2*np.prod(gs) + (j-2)*(gs[0]-1)*(gs[1]-1)+(gs[0]-1)*(gs[1]-1)
                ], (gs[0]-1, gs[1]-1))

        # Filtering & weighting
        # Initialize kernel
        kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
        # kernel = kernel/np.sum(kernel)
        # mask = np.where(((values[0, :, :] <= 0.03) | (values[0, :, :] >= 0.97)) & (values[1, :, :] != 0), 1, 0)
        values[1, :, :] = np.where(
            (values[0, :, :] > 0.05) & (values[0, :, :] < 0.95),
            values[1, :, :], 0)
        # '''
        # Weighten cuvature
        # Get weights in every cell
        weights = (1 - 2 * np.abs(0.5 - values[0, :, :])) * np.where(
            values[1, :, :] != 0, 1, 0)
        # Get sum of weights by folding with kernel
        weight_array = ndimage.convolve(weights,
                                        kernel,
                                        mode='constant',
                                        cval=0.0)

        # Weighten curvature by convolving values*weights with kernel
        values[1, :, :] = np.where(
            (values[0, :, :] > 0.05) & (values[0, :, :] < 0.95),
            ndimage.convolve(
                values[1, :, :] * weights, kernel, mode='constant', cval=0.0) /
            weight_array, 0)

        # '''
        '''
        # Filter curvature and expand to 0.0075 - 0.9925
        # Get weights in every cell
        weights = np.where(values[1, :, :] != 0, 1, 0)
        # Get sum of weights by folding with kernel
        weight_array = ndimage.convolve(weights, kernel, mode='constant', cval=0.0)

        # Weighten curvature by convolving values*weights with kernel
        [lower, upper] = [0.0075, 0.9925]
        # [lower, upper] = [0.05, 0.95]
        values[1, :, :] = np.where((values[0, :, :] > lower) & (values[0, :, :] < upper),
                                   ndimage.convolve(values[1, :, :], kernel, mode='constant', cval=0.0)/weight_array,
                                   0)
        # '''

        if (0 == 1):  # no export
            fig, ax = plt.subplots()
            # ax.imshow(values[0, :, :], cmap='Greys_r')
            ax.imshow(values[1, :, :],
                      cmap='viridis',
                      norm=plt.Normalize(-30, 100))
            plt.show()
        else:
            # Make figure without border
            fig = plt.figure(frameon=False)
            fig.set_size_inches(10, 10)  # Square
            # fig.set_size_inches(5,10) # For rising bubble
            # fig.set_size_inches(10,5)
            ax = plt.Axes(fig, [0., 0., 1., 1.])
            ax.set_axis_off()
            fig.add_axes(ax)
            ''' # Artefakt
            values = np.rot90(values, k=3, axes=(1, 2)) 
            sqrsize = 45
            xlower = 17
            ylower = 68
            # '''
            ''' # Zebra
            values = np.rot90(values, k=1, axes=(1, 2)) 
            sqrsize = 45
            xlower = 10
            ylower = 42
            # '''
            ''' # Falsche Werte
            values = np.rot90(values, k=1, axes=(1, 2)) 
            sqrsize = 16
            xlower = 64-16
            ylower = 128-25
            limits = [[xlower, xlower+sqrsize], [ylower, ylower+sqrsize]] # x, y
            # limits = [[0, 80], [0, 160]] # x, y
            # ''' # Horn
            '''
            values = np.rot90(values, k=1, axes=(1, 2)) 
            sqrsize = 20
            xlower = 22
            ylower = 47
            limits = [[xlower, xlower+sqrsize], [ylower, ylower+int(sqrsize/2)]] # x, y
            # '''
            # values = np.rot90(values, k=1, axes=(1, 2))
            sqrsize = 128
            xlower = 0
            ylower = 0
            limits = [[xlower, xlower + sqrsize], [ylower,
                                                   ylower + sqrsize]]  # x, y
            # '''
            y, x = np.meshgrid(
                np.linspace(limits[1][0], limits[1][1],
                            limits[1][1] - limits[1][0]),
                np.linspace(limits[0][0], limits[0][1],
                            limits[0][1] - limits[0][0]))
            # For rising bubble
            # x = x.T
            # y = y.T
            # Krümmung oszillierende Blase (-30 - 100)
            # pcm = ax.pcolormesh(x, y, values[1, limits[0][0]:limits[0][1], limits[1][0]:limits[1][1]], cmap='RdBu', norm=colors.TwoSlopeNorm(vmin=-30, vcenter=0, vmax=100))
            # Krümmung oszillierende Blase (-30 - 100) verzerrt
            # pcm = ax.pcolormesh(x, y, values[1, limits[0][0]:limits[0][1], limits[1][0]:limits[1][1]], cmap='Blues', vmin=50, vmax=100)
            # Krümmung statische Blase (-0.3 - 1)
            # pcm = ax.pcolormesh(y, x, values[1, limits[0][0]:limits[0][1], limits[1][0]:limits[1][1]], cmap='RdBu', norm=colors.TwoSlopeNorm(vmin=-0.3, vcenter=0, vmax=1))

            # Konzentration verzerrte Skala
            # pcm = ax.pcolormesh(x, y, values[0, limits[1][0]:limits[1][1], limits[0][0]:limits[0][1]], cmap='Greys_r', norm=colors.TwoSlopeNorm(vmin=0, vcenter=0.1, vmax=1))
            # Konzentration lineare Skala
            # pcm = ax.pcolormesh(x, y, values[0, limits[0][0]:limits[0][1], limits[1][0]:limits[1][1]], cmap='Greys_r', norm=colors.TwoSlopeNorm(vmin=0, vcenter=0.5, vmax=1))
            pcm = ax.pcolormesh(y,
                                x,
                                weights[limits[1][0]:limits[1][1],
                                        limits[0][0]:limits[0][1]],
                                cmap='Greys_r',
                                norm=colors.TwoSlopeNorm(vmin=0,
                                                         vcenter=0.5,
                                                         vmax=1))
            # Konzentration stark verzerrt um 0.5
            # pcm = ax.pcolormesh(x, y, values[0, limits[0][0]:limits[0][1], limits[1][0]:limits[1][1]], cmap='Greys_r', norm=colors.TwoSlopeNorm(vmin=0.485, vcenter=0.5, vmax=0.505))

            # tkz.save('result2d.tex', axis_height='7cm', axis_width='7cm')
            # '''
            plt.savefig('result2d.eps')

            with open('result2d.eps', 'r') as myfile:
                data = myfile.read()
                data = re.sub(r'fill', 'gsave fill grestore stroke', data)

            with open('result2d.eps', 'w') as myfile:
                myfile.write(data)
Пример #56
0
kc /= np.sum(kc)

km = rv.pdf(np.dstack(np.meshgrid(t_mid, t_mid)))
km /= np.sum(km)

kv = rv.pdf(np.dstack(np.meshgrid(t_grid, t_mid)))
kv /= np.sum(kv)

kh = kv.T

z = np.random.randn(2, 2)
for ni in range(1, args.num_iter + 1):
    z_new = np.random.randn(2**ni + 1, 2**ni + 1)
    z_new /= args.decay**(ni - 1)

    z_new[::2, ::2] += convolve(z, kc)
    z_new[1::2, 1::2] += convolve(z, km)[:-1, :-1]
    z_new[1::2, ::2] += convolve(z, kv)[:-1, :]
    z_new[::2, 1::2] += convolve(z, kh)[:, :-1]

    z = z_new

m = (args.max - args.min) / (z.max() - z.min())
b = args.min - m * z.min()
z = m * z + b

fig = plt.figure(2, figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
cax = ax.imshow(z)
plt.colorbar(cax)
Пример #57
0
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hxx Hxy]
            [Hxy Hyy]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective x- and y-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    Hxx : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hxy : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hyy : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
    >>> Hxx
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  1.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])

    """

    image = _prepare_grayscale_input_2D(image)

    # window extent to the left and right, which covers > 99% of the normal
    # distribution
    window_ext = max(1, np.ceil(3 * sigma))

    ky, kx = np.mgrid[-window_ext:window_ext + 1, -window_ext:window_ext + 1]

    # second derivative Gaussian kernels
    gaussian_exp = np.exp(-(kx**2 + ky**2) / (2 * sigma**2))
    kernel_xx = 1 / (2 * np.pi * sigma**4) * (kx**2 / sigma**2 - 1)
    kernel_xx *= gaussian_exp
    kernel_xx /= kernel_xx.sum()
    kernel_xy = 1 / (2 * np.pi * sigma**6) * (kx * ky)
    kernel_xy *= gaussian_exp
    kernel_xy /= kernel_xx.sum()
    kernel_yy = kernel_xx.transpose()

    Hxx = ndimage.convolve(image, kernel_xx, mode=mode, cval=cval)
    Hxy = ndimage.convolve(image, kernel_xy, mode=mode, cval=cval)
    Hyy = ndimage.convolve(image, kernel_yy, mode=mode, cval=cval)

    return Hxx, Hxy, Hyy
Пример #58
0
''' Write a program to detect the point in the given gray level image with a suitable mask'''

import cv2
import numpy as np
from scipy import ndimage

image = cv2.imread('img3.jpeg', 0)
cv2.imshow("original", image)

kernel_laplace1 = np.array(
    [np.array([-1, -1, -1]),
     np.array([-1, 8, -1]),
     np.array([-1, -1, -1])])
print(kernel_laplace1, 'point detection')

out_h = ndimage.convolve(image, kernel_laplace1, mode='reflect')
print(out_h)
h = image.shape[0]
w = image.shape[1]

image_h = np.empty([h, w], dtype="uint8")

for x in range(0, w):
    for y in range(0, h):
        if abs(out_h[y, x]) == 0:
            image_h[y, x] = 1
        else:
            image_h[y, x] = 0

cv2.imshow("Point ", image_h)
cv2.waitKey(0)
Пример #59
0
import cv2
import numpy as np
from scipy import ndimage

kernel_3x3 = np.array([[-1,-1,-1],
                       [-1,8,-1],
                       [-1,-1,-1]])
img = cv2.imread('1.jpg', 0)
k3 = ndimage.convolve(img, kernel_3x3)
blurred = cv2.GaussianBlur(img, (11,11), 0)
g_hpf = img - blurred
cv2.imshow('3x3', k3)
cv2.imshow('g_hpf', g_hpf)
cv2.waitKey()
cv2.destroyAllWindows()
Пример #60
0
import cv2
import numpy as np
from scipy import ndimage

kl_3x3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])

kl_5x5 = np.array([[-1, -1, -1, -1, -1], [-1, 1, 2, 1, -1], [-1, 2, 4, 2, -1],
                   [-1, 1, 2, 1, -1], [-1, -1, -1, -1, -1]])

img = cv2.imread("1.jpg", 0)

k3 = ndimage.convolve(img, kl_3x3)
k5 = ndimage.convolve(img, kl_5x5)

blurred = cv2.GaussianBlur(img, (11, 11), 0)
gf = img - blurred

cv2.imshow("private", img)
cv2.imshow("k3", k3)
cv2.imshow("k5", k5)
cv2.imshow("gf", gf)

cv2.waitKey()
cv2.destroyAllWindows()