Пример #1
0
    def _apply_median_filter(self, kernel=(3, 3)):
        """
        apply a median filter to the data to remove extreme outliers
        
        kernel is (station, frequency)
        
        """

        if self.ellipse_colorby == "phimin":
            color_array = np.array([rpt.residual_pt.phimin[0] for rpt in self.residual_pt_list])
            filt_array = sps.medfilt2d(color_array, kernel_size=kernel)
            for ss in range(filt_array.shape[0]):
                self.residual_pt_list[ss].residual_pt.phimin[0] = filt_array[ss]

        elif self.ellipse_colorby == "phimax":
            color_array = np.array([rpt.residual_pt.phimax[0] for rpt in self.residual_pt_list])
            filt_array = sps.medfilt2d(color_array, kernel_size=kernel)
            for ss in range(filt_array.shape[0]):
                self.residual_pt_list[ss].residual_pt.phimax[0] = filt_array[ss]

        elif self.ellipse_colorby == "skew":
            color_array = np.array([rpt.residual_pt.beta[0] for rpt in self.residual_pt_list])
            filt_array = sps.medfilt2d(color_array, kernel_size=kernel)
            for ss in range(filt_array.shape[0]):
                self.residual_pt_list[ss].residual_pt.beta[0] = filt_array[ss]

        # --> need to do azimuth for all
        color_array = np.array([rpt.residual_pt.azimuth[0] for rpt in self.residual_pt_list])
        filt_array = sps.medfilt2d(color_array, kernel_size=kernel)
        for ss in range(filt_array.shape[0]):
            self.residual_pt_list[ss].residual_pt.azimuth[0] = filt_array[ss]
Пример #2
0
    def _apply_median_filter(self, kernel=(3, 3)):
        """
        apply a median filter to the data to remove extreme outliers
        
        kernel is (station, frequency)
        
        """

                                   
        filt_phimin_arr = sps.medfilt2d(self.rpt_array['phimin'], 
                                        kernel_size=kernel)                            
        filt_phimax_arr = sps.medfilt2d(self.rpt_array['phimax'], 
                                        kernel_size=kernel)                            
        filt_skew_arr = sps.medfilt2d(self.rpt_array['skew'],
                                      kernel_size=kernel)                            
        filt_azimuth_arr = sps.medfilt2d(self.rpt_array['azimuth'], 
                                         kernel_size=kernel) 
        
        self.rpt_array['phimin'] = filt_phimin_arr
        self.rpt_array['phimax'] = filt_phimax_arr
        self.rpt_array['skew'] = filt_skew_arr
        self.rpt_array['azimuth'] = filt_azimuth_arr
        self.rpt_array['geometric_mean'] = np.sqrt(abs(filt_phimin_arr*\
                                                   filt_phimax_arr))
        
        print 'Applying Median Filter with kernel {0}'.format(kernel)
Пример #3
0
    def spatial_smooth(
            self,
            kernel=None,
            convbeam=True,
            spatial_smooth=None,
            spectral_smooth=None,
            niter=1
            ):
        """
        Smooth the noise estimate in the spatial dimension. Two
        components: median smoothing and convolving with the beam.
        """

        # Manually median filter (square box)
        if kernel is not None:
            print "Median filtering"
            self.spatial_norm = ssig.medfilt2d(self.spatial_norm,
                                               kernel_size=kernel)

        data = self.cube.filled_data[:].astype('=f')

        if self.spatial_norm is None:
            self.spatial_norm = np.ones(data.shape[-2:])
            self.spectral_norm = np.ones((data.shape[0]))
        for count in range(niter):
            scale = self.scale_cube
            snr = data/scale
            self.spatial_norm = nanstd(snr,axis=0)*self.spatial_norm
            if self.beam is not None:
                if self.astropy_beam_flag:
                    beam = self.beam
                else:
                    beam = self.beam.as_kernel(get_pixel_scales(self.cube.wcs))

                self.spatial_norm = convolve_fft(self.spatial_norm,
                                                 beam,
                                                 interpolate_nan=True,
                                                 normalize_kernel=True)
            if spatial_smooth is not None:
                self.spatial_norm = ssig.medfilt2d(self.spatial_norm,
                    kernel_size=spatial_smooth)

            snr = data/self.scale_cube
            self.spectral_norm = nanstd(snr.reshape((snr.shape[0],
                                                     snr.shape[1]*
                                                     snr.shape[2])),
                                        axis=1)*self.spectral_norm
            if spectral_smooth is not None:
                self.spectral_norm = ssig.medfilt(self.spectral_norm,
                    kernel_size=spectral_smooth)
        self.spectral_norm[np.isnan(self.spectral_norm) | (self.spectral_norm==0)]=1.
        self.spatial_norm[np.isnan(self.spatial_norm) | (self.spatial_norm==0)]=1.
        self.spatial_norm[~self.spatial_footprint]=np.nan
        ### THIS IS ALREADY SET IN calculate_scale
        # self.distribution_shape=(0,self.scale)
        return
Пример #4
0
def convert_cloud_to_fmask(raster):
    from numpy.core.numerictypes import byte
    z, y, x = raster.shape
    fmask = numpy.zeros((y, x)).astype(byte)  # FMASK_LAND
    fmask = numpy.where (raster[0, :, :] > 100, FMASK_CLOUD, fmask)
    fmask = numpy.where (raster[0, :, :] < 100, FMASK_CLOUD_SHADOW, fmask)
    fmask = numpy.where (raster[0, :, :] == 0, FMASK_LAND, fmask)
    fmask = numpy.where (raster[0, :, :] == -999.0, FMASK_OUTSIDE, fmask)
    fmask = signal.medfilt2d(fmask * 1.0, kernel_size=3)
    fmask = signal.medfilt2d(fmask * 1.0, kernel_size=5)    
    return fmask
Пример #5
0
 def speckle_filter(self, filter_name, ws):
     """
     Filter the image using 'median' filtering methods
     """
     if filter_name == 'median':
         # filter the image using median filter 
         self.SigmaHHmf = medfilt2d(self.SigmaHH, kernel_size=ws)
         self.SigmaHVmf = medfilt2d(self.SigmaHV, kernel_size=ws)
         self.SigmaVHmf = medfilt2d(self.SigmaVH, kernel_size=ws)
         self.SigmaVVmf = medfilt2d(self.SigmaVV, kernel_size=ws)
         return self.SigmaHHmf, self.SigmaHVmf, \
             self.SigmaVHmf, self.SigmaVVmf
         
     else:
         print 'Please specify the name of the filter: "median"'
Пример #6
0
def rc_focus_check(files):
    

    fpos = []
    metrics = []
    for fn in files:
        F = pf.open(fn)
        
        dat,hdr = F[0].data, F[0].header
        dat = dat[1200:1900,1200:1900]
        dat -= np.median(dat)
        
        md = signal.medfilt2d(dat)
        bad = (dat-md)/md > 5
        dat[bad]=md[bad]
        
        sort = np.sort(dat.flatten())
        a,b = np.floor(len(sort)*.03), np.ceil(len(sort)*.97)
        metric = (sort[b]-sort[a])/sort[b]
        metric = np.max(dat) - np.min(dat)
        fpos.append(hdr["secfocus"])
        metrics.append(metric)
        
        print fn, hdr["secfocus"], hdr["EXPTIME"], metric
        

    x = np.argmax(metrics)
    return fpos[x], fpos, metrics
Пример #7
0
    def test_basic(self):
        f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
             [50, 50, 50, 50, 50,  0, 72, 77, 68, 66],
             [50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
             [50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
             [50, 50, 50, 50, 50, 46, 34,  9, 21, 66],
             [70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
             [64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
             [ 3, 33, 53, 67,  1, 78, 74, 55, 12, 83],
             [ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
             [32, 61, 88,  7, 39,  4, 92, 64, 45, 61]]

        d = signal.medfilt(f, [7, 3])
        e = signal.medfilt2d(np.array(f, np.float), [7, 3])
        assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27,  0],
                               [ 0, 50, 50, 50, 50, 42, 19, 21, 29,  0],
                               [50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
                               [50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
                               [50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
                               [33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
                               [32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
                               [ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
                               [ 0, 32, 33, 39, 32, 32, 43, 43, 43,  0],
                               [ 0,  7, 11,  7,  4,  4, 19, 19, 24,  0]])
        assert_array_equal(d, e)
Пример #8
0
def speckle_filter(ifile, ofile):
    """
    ifile
    ofile
    """
    # read the Digital Number (DNp)
    dataset = gdal.Open(ifile,GA_ReadOnly)
    sigma = dataset.GetRasterBand(1).ReadAsArray()
    inci = dataset.GetRasterBand(2).ReadAsArray()
    RasterXSize = dataset.RasterXSize
    RasterYSize = dataset.RasterYSize
    GT = dataset.GetGeoTransform()
    projection = dataset.GetProjection()
    dataset = None
    
    # filter
    sigma = medfilt2d(sigma, kernel_size=7)
    #sigma = wiener(sigma, mysize=(7,7),noise=None)
    
    # same as geotiff
    driver = gdal.GetDriverByName('GTiff')
    output_dataset = driver.Create(ofile, RasterXSize, RasterYSize,2,gdal.GDT_Float32)
    output_dataset.SetGeoTransform(GT)
    output_dataset.SetProjection(projection)
    output_dataset.GetRasterBand(1).WriteArray(sigma, 0, 0)
    output_dataset.GetRasterBand(2).WriteArray(inci, 0, 0)
    output_dataset = None
Пример #9
0
def sigmaVV(dataset, xOff=0, yOff=0, xS=None, yS=None, \
    xBufScale=None, yBufScale=None, s='abs', filter_name='wiener', ws=7, que=[]):
    # calculate the sinclair matrix
    S_VV = dataset.GetRasterBand(2).ReadAsArray(xoff=xOff, yoff=yOff, \
    win_xsize=xS, win_ysize=yS, \
    buf_xsize=xBufScale, buf_ysize=yBufScale)
    # calculate the magnitude
    S_VV_ABS = absolute(S_VV)
    # calculate the linear sigma_naught
    if s == 'abs':
        SigmaVV = S_VV_ABS**2
    # calculate the sigma_naught in dB
    if s == 'sigma0': 
        SigmaVV = 2*10*log10(S_VV_ABS)
#    SigmaVVwnr = wiener(SigmaVV,mysize=(7,7),noise=None)
    #we're putting return value into queue
    que.put(SigmaVV)
    #Filter the image using 'median' or Lee 'wiener' filtering methods
    if filter_name == 'median':
        # filter the image using median filter
        SigmaVVmed = medfilt2d(SigmaHH, kernel_size=ws)
        que.put(SigmaVVmed)
    elif filter_name == 'wiener':
        # filter the image using wiener filter
        SigmaVVwnr = wiener(SigmaVV,mysize=(ws,ws),noise=None)
        que.put(SigmaVVwnr)
    else:
        print 'Please specify the name of the filter: "median" or "wiener"'
Пример #10
0
def medfiltnan(a, kernel, thresh=0):
    """
    Do a running median filter of the data.

    Regions where more than *thresh* fraction of the points are NaN
    are set to NaN.

    Currently only work for vectors.
    """
    flag_1D = False
    if a.ndim == 1:
        a = a[None, :]
        flag_1D = True
    try:
        len(kernel)
    except:
        kernel = [1, kernel]
    out = medfilt2d(a, kernel)
    if thresh > 0:
        out[convolve2d(np.isnan(a),
                       np.ones(kernel) / np.prod(kernel),
                       'same') > thresh] = np.NaN
    if flag_1D:
        return out[0]
    return out
Пример #11
0
 def replace_nans(self, inplace=False):
     nans = np.isnan(self)
     tmp = medfilt2d(self, 3)
     if inplace:
         self[nans] = tmp[nans]
     else:
         y = self.copy()
         y[nans] = tmp[nans]
         return y
Пример #12
0
def main():
    f = np.hstack((np.ones((200, 100))*0.3, np.ones((200, 100))*0.7))
    lena, camera = get_data()
    gauss_noised = f + np.random.normal(size=f.shape, scale=0.1)
    sp_noised = salt_and_pepper(f, density=0.05)
    specked = speckle_noise(f)

    f1 = array_with_hist(f)
    f2 = array_with_hist(gauss_noised)
    f3 = array_with_hist(sp_noised)
    f4 = array_with_hist(specked)

    noisy_lena = lena + np.random.normal(scale=0.002**.5, size=lena.shape)

    f5 = array_with_hist(noisy_lena)

    # Various Kernels
    k = np.ones((3,3), dtype=np.double) * 1. / 9.
    k7 = np.ones((7,7), dtype=np.double)*1./49.
    g = gauss_kernel(3)

    denoised = convolve2d(noisy_lena, k, mode='same')
    denoised2 = convolve2d(noisy_lena, k7, mode='same')

    f5 = array_with_hist(lena)
    f6 = array_with_hist(noisy_lena)
    f7 = array_with_hist(denoised)
    f8 = array_with_hist(denoised2)
    zz = convolve2d(noisy_lena, g, mode='same')
    f15 = array_with_hist(zz)  # Apologies for order change.

    # Salt and Pepper
    salted = salt_and_pepper(lena)
    avg_salt = convolve2d(salted, k7, mode='same')
    norm_salt = convolve2d(salted, g, mode='same')
    med_lena = medfilt2d(salted, 7)

    f9 = array_with_hist(salted)
    f10 = array_with_hist(avg_salt)
    f11 = array_with_hist(norm_salt)
    f12 = array_with_hist(med_lena)

    # Sharpening:
    # 2x2 Camera: Original, Blurred, Residual, Boosted
    g_camera = convolve2d(camera, g, mode='same')
    sharper, axes = plt.subplots(2, 2, sharey=True, figsize=(8,8))
    ims = (camera, g_camera, r, camera + r)
    labels = ('original', 'gaussian blurred', 'residual', 'boosted')
    for i, e in enumerate(axes.flat):
        e.imshow(ims[i], cmap='gray', vmin=0., vmax=1.)
        e.set_title(labels[i])
    axes.flat[2].imshow(r, cmap='gray_r')

    f13 = sharper
    f14 = three_by_three_unsharp(camera, r)

    print 'fin'
Пример #13
0
def XPADcalcFlatFieldDarkMask(dark,flat,medfilt_width=21):
  print "XPADcalcFlatFieldDarkMask - median filter will take a few seconds..."
  # flat field calculated using a large median filter
  flatmed=signal.medfilt2d(flat,medfilt_width)
  #Include in the dark mask pixels counting less than half or more than 50%
  #than the median filtered value
  mask=(flat<(.5*flatmed))+(flat>(1.5*flatmed))
  # Also include pixels counting in the dark...
  mask+=(dark>5)
  return flatmed/(flat+1e-8)*(mask==0),mask>0
Пример #14
0
    def speckle_filter(self, filter_name='wiener', ws=7):
        """
        Filter the image using 'median' filtering methods
        """
        if filter_name == 'median':
            # filter the image using median filter
            self.SigmaHHmed = medfilt2d(self.SigmaHH, kernel_size=ws)
            self.SigmaHVmed = medfilt2d(self.SigmaHV, kernel_size=ws)
            self.SigmaVHmed = medfilt2d(self.SigmaVH, kernel_size=ws)
            self.SigmaVVmed = medfilt2d(self.SigmaVV, kernel_size=ws)

        elif filter_name == 'wiener':
            # filter the image using wiener filter
            self.SigmaHHwnr = wiener(self.SigmaHH,mysize=(ws,ws),noise=None)
            self.SigmaHVwnr = wiener(self.SigmaHV,mysize=(ws,ws),noise=None)
            self.SigmaVHwnr = wiener(self.SigmaVH,mysize=(ws,ws),noise=None)
            self.SigmaVVwnr = wiener(self.SigmaVV,mysize=(ws,ws),noise=None)

        else:
            print 'Please specify the name of the filter: "median"'
Пример #15
0
def median(img, kernel_size):
    """
    Median filter.

    Parameters :
        img         : input 2D image
        kernel_size : (length, width) of kernel
    """
    # Convert to numpy array if necessary
    if not isinstance(img, np.ndarray): img = np.array(img)

    return sig.medfilt2d(img, kernel_size)
def denoise_image(inp):
    # estimate 'background' color by a median filter
    bg = signal.medfilt2d(inp, 11)
    save('background.png', bg)

    # compute 'foreground' mask as anything that is significantly darker than
    # the background
    mask = inp < bg - 0.1
    save('foreground_mask.png', mask)

    # return the input value for all pixels in the mask or pure white otherwise
    return np.where(mask, inp, 1.0)
Пример #17
0
    def measureData(self):        
        roiDataFilt = medfilt2d(np.double(self.roiData), 5)
        self.spectrumData = np.sum(self.roiData, 1) / self.roiData.shape[1]
        if self.wavelengths is None:
            self.generateWavelengths()
        if self.wavelengths.shape[0] == self.spectrumData.shape[0]:
            self.plot1.setData(x=self.wavelengths, y=self.spectrumData)
            self.plot1.update()

#         goodInd = np.arange(self.signalStartIndex.value(), self.signalEndIndex.value() + 1, 1)
#         bkgInd = np.arange(self.backgroundStartIndex.value(), self.backgroundEndIndex.value() + 1, 1)
#         bkg = self.waveform1[bkgInd].mean()
#         bkgPump = self.waveform2[bkgInd].mean()
#         autoCorr = (self.waveform1[goodInd] - bkg).sum()
#         pump = (self.waveform2[goodInd] - bkgPump).sum()
# #        pump = 1.0
#         if self.normalizePumpCheck.isChecked() == True:
#             try:
#                 self.trendData1 = np.hstack((self.trendData1[1:], autoCorr / pump))
#             except:
#                 pass
#         else:
#             self.trendData1 = np.hstack((self.trendData1[1:], autoCorr))
#         self.plot3.setData(y=self.trendData1)        

        # Evaluate the fps
        t = time.time()
        if self.measureUpdateTimes.shape[0] > 10:
            self.measureUpdateTimes = np.hstack((self.measureUpdateTimes[1:], t))
        else:
            self.measureUpdateTimes = np.hstack((self.measureUpdateTimes, t))
        fps = 1 / np.diff(self.measureUpdateTimes).mean()
        self.fpsLabel.setText(QtCore.QString.number(fps, 'f', 1))

        # If we are running a scan, update the scan data
        if self.running == True:
            
                
            if self.moving == False and self.moveStart == False:
                if self.trendData1 is None:
                    self.trendData1 = self.spectrumData      
                    self.timeMarginalTmp = self.spectrumData.sum()          
                else:
                    self.trendData1 += self.spectrumData
                    self.timeMarginalTmp += self.spectrumData.sum()
                self.currentSample += 1
                if self.currentSample >= self.avgSamples:
                    self.running = False
                    self.measureScanData()
                    self.trendData1 = None
                    self.currentSample = 0
                    self.scanUpdateAction()
Пример #18
0
def fix_medfilt2d(array,width):
    """Wrap medfilt2d so that the results more closely resemble IDL MEDIAN().
    """
    from numpy import arange
    from scipy.signal import medfilt2d
    medarray = medfilt2d(array,min(width,array.size))
    istart = int((width-1)/2)
    iend = (array.shape[0] - int((width+1)/2), array.shape[1] - int((width+1)/2))
    i = np.arange(array.shape[0])
    j = np.arange(array.shape[1])
    w = ((i < istart) | (i > iend[0]), (j < istart) | (j > iend[1]))
    medarray[w[0],w[1]] = array[w[0],w[1]]
    return medarray
Пример #19
0
def Medfilt( array, axis, kernel_size, Nprocess=None ) : 
	'''
	array:
		N-d array

	axis:
		None | int number
		(1) ==None: use scipy.signal.medfilt(array, kernel_size)
		(2) ==int number: mediam filter along this axis
	'''
	Nprocess = NprocessCPU(Nprocess, False)[0]
	array, kernel_size =npfmt(array), npfmt(kernel_size).flatten()
	shape, dtype = array.shape, array.dtype

	tf = (kernel_size % 2 == 0)
	kernel_size[tf] += 1
	kernel_size = kernel_size[:len(shape)]
	kernel_size = np.append(kernel_size, [kernel_size[0] for i in xrange(len(shape)-len(kernel_size))])

	if (axis is None) : 
		if (len(shape) == 2) : 
			array = spsn.medfilt2d(1.*array, kernel_size)
		else : array = spsn.medfilt(array, kernel_size)

	else : 
		axis = int(round(axis))
		if (axis < 0) : axis = len(shape) + axis
		if (axis >= len(shape)) : Raise(Exception, 'axis='+str(axis)+' out of '+str(len(shape))+'D array.shape='+str(shape))
		kernel_size = kernel_size[axis]

		if (len(shape) == 1) : 
			array = spsn.medfilt(array, kernel_size)
		else : 
			array = ArrayAxis(array, axis, -1)
			shape = array.shape
			array = array.reshape(np.prod(shape[:-1]), shape[-1])
			sent = array
			bcast = kernel_size
			if (Nprocess == 1) : 
				array = _Multiprocess_medfile([None, sent, bcast])
			else : 
				pool = PoolFor(0, len(array), Nprocess)
				array = pool.map_async(_Multiprocess_medfile, sent, bcast)
				array = np.concatenate(array, 0)

			array = array.reshape(shape)
			array = ArrayAxis(array, -1, axis)

	array = array.astype(dtype)
	return array
Пример #20
0
def median_filter(s_or_a, order=11, p=2):

    if hasattr(s_or_a, 'signal'):
        a = s_or_a
        s = get_spectrogram(a, a.sample_rate / 10, a.sample_rate / 20, return_angles=True)
    else:
        a = None
        s = s_or_a

    harmonic = medfilt2d(np.abs(s), (abs(order), 1))
    percussive = medfilt2d(np.abs(s), (1, abs(order)))
    if order > 0:
        mask = harmonic ** p / (harmonic ** p + percussive ** p)
    else:
        mask = percussive ** p / (harmonic ** p + percussive ** p)
    masked = s * mask
    masked[np.isnan(masked)] = 0

    if a:
        masked_audio = get_audio(masked, a.sample_rate, a.sample_rate / 10, a.sample_rate / 20)
        return masked_audio
    else:
        return masked
Пример #21
0
def calculate_water(data):
    z, y, x = data.shape
    pot_water = numpy.zeros((y, x))
    for b in range(z - 1, 1, -1):
        pot_water[:, :] = numpy.where(data[b, :, :] < data[b - 1, :, :], pot_water + b * 1, pot_water)
    for b in range(z - 1, 1, -1):
        pot_water[:, :] = numpy.where(data[b, :, :] > data[b - 1, :, :], pot_water - b * 1, pot_water)
    # NDWI bands
    b = 4
    pot_water[:, :] = numpy.where(data[b, :, :] > data[b - 3, :, :], pot_water - 1, pot_water) 
    # NIR-RE diff to  R-RE aka sun glid
    dark_nir = numpy.where(data[b, :, :] < 0.12, data, data * 0)
    pot_water[:, :] = numpy.where(numpy.abs(dark_nir[b, :, :] - dark_nir[b - 1, :, :]) < abs(dark_nir[b - 1, :, :] - dark_nir[b - 2, :, :]), pot_water + 1.5, pot_water - 1.5) 
    pot_water = signal.medfilt2d(pot_water, kernel_size=5)
    pot_water = numpy.where(data[0, :, :] == 0, -999, pot_water)
    return pot_water
Пример #22
0
 def smooth(self, size = 4):
     assert size > 0
     def phaseComplement(value):
         value -= (value > np.pi)*2*np.pi
         value += (value < - np.pi)*2*np.pi
         return value
     new_data = np.zeros_like(self.data)
     for frame in range( self.data.shape[0]):
         if frame % 10 == 0 : print frame
         for n in range(self.data.shape[1])[size:-size]:
             for m in range(self.data.shape[2])[size:-size]:
                 base = self.data[frame, n, m]
                 target = self.data[frame, n-size:n+size+1, m-size:m+size+1]
                 difference = phaseComplement(target-base)
                 diff = signal.medfilt2d(difference, kernel_size=size*2+1)[size,size]
                 new_data[frame, n, m] = phaseComplement( base +  diff)
                 #new_data[frame, n, m] = phaseComplement( base + np.mean(difference.flatten()) )
     self.data = new_data
Пример #23
0
def denoise_im_with_back(inp):
    # estimate 'background' color by a median filter
    bg = signal.medfilt2d(inp, 11)
    save('background.png', bg)

    # compute 'foreground' mask as anything that is significantly darker than
    # the background
    mask = inp < bg - 0.1    
    save('foreground_mask.png', mask)
    back = np.average(bg);
    
    # Lets remove some splattered ink
    mod = ndimage.filters.median_filter(mask,2);
    mod = ndimage.grey_closing(mod, size=(2,2));
       
    # either return forground or average of background
       
    out = np.where(mod, inp, back)  ## 1 is pure white    
    return out;
Пример #24
0
			def update(val):
				thres = sthres.val
				newmatrix = signal.medfilt2d(thematrix, kernel_size=kernel_size)
				belowthres_indices = newmatrix < thres
				newmatrix[belowthres_indices] = 0			
				labelarray,numfoundrois = measurements.label(newmatrix)
				print str(numfoundrois) + ' ROIs found!'	
				# organize rois
				therois = []
				for n in range(numfoundrois):
					rawindices = np.nonzero(labelarray == n+1)
					eachroi = []
					for m in range(len(rawindices[0])):
						eachroi.append((rawindices[1][m],rawindices[0][m]))
					therois.append(eachroi)
				theimage.set_data(newmatrix)
				plt.draw()
				roi_result.therois =  therois
				roi_result.numfoundrois = numfoundrois
Пример #25
0
def get_spots_mask( A, median_size=None, nofroi=12,  give_borders=False) :


      A = signal.medfilt2d(A, kernel_size=median_size)
  

      mask=np.zeros(A.shape,"i")
      cerchi = CercaAnelli(A)
      for c in cerchi[:]:
        if len(c)<LENMIN: continue
        for y,x in c:
          mask[y,x] = 1

      filled=morph.binary_fill_holes(mask)

      newmask = relabelise(filled,A, nofroi)

      if give_borders:
        return newmask, mask
      else:
        return newmask
Пример #26
0
def volumeMask(vol):
    """
    :param vol: a 3-dimensional numpy array
    :return: mask, a binary mask with the same shape as vol, and mCoords, a list of (x,y,z) indices representing the
    masked coordinates.
    """
    from numpy import array, where
    from scipy.signal import medfilt2d
    from skimage.filter import threshold_otsu
    from skimage import morphology as morph

    filtVol = array([medfilt2d(x.astype('float32')) for x in vol])

    thr = threshold_otsu(filtVol.ravel())
    mask = filtVol > thr
    strel = morph.selem.disk(3)
    mask = array([morph.binary_closing(x, strel) for x in mask])
    mask = array([morph.binary_opening(x, strel) for x in mask])

    z, y, x = where(mask)
    mCoords = zip(x, y, z)

    return mask, mCoords
Пример #27
0
def _enhance_img(img, median_ks, normalized=True):
    """
    Enhance the projection image from aps 1ID to counter its weak contrast 
    nature

    Parameters
    ----------
    img : ndarray
        original projection image collected at APS 1ID
    median_ks: int
        kernel size of the 2D median filter, must be odd
    normalized: bool, optional
        specify whether the enhanced image is normalized between 0 and 1,
        default is True

    Returns 
    -------
    ndarray
        enhanced projection image
    """
    wgt = _calc_histequal_wgt(img)
    img = medfilt2d(img, kernel_size=median_ks).astype(np.float64)
    img = ne.evaluate('(img**2)*wgt', out=img)
    return img/img.max() if normalized else img
Пример #28
0
    def calculate_std(self,niter=1,spatial_smooth=None,spectral_smooth=None):
        """
        Calculates the naive values for the scale and norms under the
        assumption that the median absolute deviation is a rigorous method.
        """

        data = self.cube.get_filled_data().astype('=f')
        self.scale = nanstd(data)
        if self.spatial_norm is None:
            self.spatial_norm = np.ones((data.shape[1],data.shape[2]))
            self.spectral_norm = np.ones((data.shape[0]))
        for count in range(niter):
            scale = self.get_scale_cube()
            snr = data/scale
            self.spatial_norm = nanstd(snr,axis=0)*self.spatial_norm
            if beam is not None:
                self.spatial_norm = convolve_fft(self.spatial_norm, 
                    self.beam.as_kernel(get_pixel_scales(self.cube.wcs)),
                    interpolate_nan=True,normalize_kernel=True)
            if spatial_smooth is not None:
                self.spatial_norm = ssig.medfilt2d(self.spatial_norm,
                    kernel_size=spatial_smooth)

            snr = data/self.get_scale_cube()
            self.spectral_norm = nanstd(snr.reshape((snr.shape[0],
                                                     snr.shape[1]*
                                                     snr.shape[2])),
                                        axis=1)*self.spectral_norm
            if spectral_smooth is not None:
                self.spectral_norm = ssig.medfilt(self.spectral_norm,
                    kernel_size=spectral_smooth)
        self.spectral_norm[np.isnan(self.spectral_norm) | (self.spectral_norm==0)]=1.
        self.spatial_norm[np.isnan(self.spatial_norm) | (self.spatial_norm==0)]=1.
        self.spatial_norm[~self.spatial_footprint]=np.nan
        self.distribution_shape=(0,self.scale)    
        return
Пример #29
0
    def updateFrogRoi(self):
        root.debug(''.join(('Roi pos: ', str(self.frogRoi.pos()))))
        root.debug(''.join(('Roi size: ', str(self.frogRoi.size()))))
        if self.scanData.size != 0: 
            root.debug(''.join(('Scan data: ', str(self.scanData.shape))))
            
            bkg = self.scanData[0,:]
            bkgImg = self.scanData - np.tile(bkg, (self.scanData.shape[0], 1))
            roiImg = self.frogRoi.getArrayRegion(bkgImg, self.frogImageWidget.getImageItem(), axes=(1,0))
            roiImg = roiImg / roiImg.max()
            
            root.debug('Slice complete')
            thr = self.frogThresholdSpinbox.value()
            kernel = self.frogKernelSpinbox.value()
            root.debug('Starting medfilt...')
            filteredImg = medfilt2d(roiImg, kernel) - thr
#             filteredImg = roiImg - thr
            root.debug('Filtering complete')
            filteredImg[filteredImg<0.0] = 0.0
            root.debug('Threshold complete')
            self.frogRoiImageWidget.setImage(filteredImg)
            root.debug('Set image complete')
            self.frogRoiImageWidget.autoRange()
            root.debug('Autorange complete')
Пример #30
0
        os.makedirs(SAVE_F)
        os.makedirs(SAVE_M)
    except os.error:
        pass

    files = os.listdir(PATH)
    for i in range(len(files)):
        file = files[i]
        input_x = SimpleITK.GetArrayFromImage(
            SimpleITK.ReadImage(PATH + "/" + file))
        input_x = np.asarray(input_x).reshape([512, 512, 3])

        sx1_, mask_x1_ = sess.run(
            [sx, mask_x],
            feed_dict={x: np.asarray([input_x[:, :, 0:1]]).astype('float32')})
        sx1_ = signal.medfilt2d(np.asarray(sx1_)[0, :, :, 0, ],
                                kernel_size=k_size1)
        mask_x1_ = signal.medfilt2d(np.asarray(mask_x1_)[0, :, :, 0, ],
                                    kernel_size=k_size2)

        sx2_, mask_x2_ = sess.run(
            [sx, mask_x],
            feed_dict={x: np.asarray([input_x[:, :, 1:2]]).astype('float32')})
        sx2_ = signal.medfilt2d(np.asarray(sx2_)[0, :, :, 0, ],
                                kernel_size=k_size1)
        mask_x2_ = signal.medfilt2d(np.asarray(mask_x2_)[0, :, :, 0, ],
                                    kernel_size=k_size2)

        sx3_, mask_x3_ = sess.run(
            [sx, mask_x],
            feed_dict={x: np.asarray([input_x[:, :, 2:3]]).astype('float32')})
        sx3_ = signal.medfilt2d(np.asarray(sx3_)[0, :, :, 0, ],
Пример #31
0
def BiomassForestHeightSKPD(
    data_stack,
    cov_est_window_size,
    pixel_spacing_slant_rg,
    pixel_spacing_az,
    incidence_angle_rad,
    carrier_frequency_hz,
    range_bandwidth_hz,
    kz_stack,
    vertical_vector,
    proc_conf,
):

    power_threshold = proc_conf.power_threshold

    # data_stack is a dictionary of two nested dictionaries composed as:
    # data_stack[ acquisition_name ][ polarization ]

    num_acq = len(data_stack)
    acq_names = list(data_stack.keys())
    first_acq_dict = data_stack[acq_names[0]]
    pol_names = list(first_acq_dict.keys())
    num_pols = len(pol_names)
    Nrg, Naz = first_acq_dict[pol_names[0]].shape
    Nz = np.size(vertical_vector)

    # Covariance estimation
    (
        MPMB_correlation,
        rg_vec_subs,
        az_vec_subs,
        subs_F_r,
        subs_F_a,
    ) = main_correlation_estimation_SR(
        data_stack,
        cov_est_window_size,
        pixel_spacing_slant_rg,
        pixel_spacing_az,
        incidence_angle_rad,
        carrier_frequency_hz,
        range_bandwidth_hz,
    )

    Nrg_subs = rg_vec_subs.size
    Naz_subs = az_vec_subs.size

    # Initialization of the SKPD routine
    class SKPD_kernel_opt_str:
        pass

    SKPD_kernel_opt_str.num_acq = num_acq
    SKPD_kernel_opt_str.num_pols = num_pols
    SKPD_kernel_opt_str.Nsubspaces = 2
    SKPD_kernel_opt_str.Nparam = 2
    SKPD_kernel_opt_str.error = np.zeros((Nrg_subs, Naz_subs, 4, 2))

    # Single polarimetric channel selector
    wpol = (np.kron(np.eye(num_pols), np.ones((1, num_acq)))) > 0

    tomo_cube = np.zeros((Nrg_subs, Naz_subs, Nz))
    Nrg_subs_string = str(Nrg_subs)
    for rg_sub_idx in np.arange(Nrg_subs):

        logging.info("   Heigth step " + str(rg_sub_idx + 1) + " of " +
                     Nrg_subs_string)
        for az_sub_idx in np.arange(Naz_subs):

            # Spectra estimation initialization
            class spectra:
                pass

            spectra.temp = np.zeros((Nz, 4))
            current_MPMB_correlation = MPMB_correlation[:, :, rg_sub_idx,
                                                        az_sub_idx]

            # SKPD processing
            SKPD_kernel_out_str = SKPD_processing(current_MPMB_correlation,
                                                  SKPD_kernel_opt_str)

            if np.any(SKPD_kernel_out_str.error):
                # Calibrating with respect to the linked phases of the
                # best polarimetric channel
                Rcoh_thin = Covariance2D2Correlation2D(
                    np.reshape(
                        current_MPMB_correlation[wpol[0, :][:, np.newaxis] *
                                                 wpol[0, :][np.newaxis, :]],
                        (num_acq, num_acq),
                    ))
                Rcoh_fat = Covariance2D2Correlation2D(
                    np.reshape(
                        current_MPMB_correlation[wpol[1, :][:, np.newaxis] *
                                                 wpol[1, :][np.newaxis, :]],
                        (num_acq, num_acq),
                    ))
                Rincoh_thin = Covariance2D2Correlation2D(
                    np.reshape(
                        current_MPMB_correlation[wpol[2, :][:, np.newaxis] *
                                                 wpol[2, :][np.newaxis, :]],
                        (num_acq, num_acq),
                    ))
                Rincoh_fat = np.random.randn(
                    num_acq, num_acq) + 1j * np.random.randn(num_acq, num_acq)

            else:
                # Scattering mechanisms
                Rcoh_thin = Covariance2D2Correlation2D(
                    SKPD_kernel_out_str.Rcoh_thin)
                Rcoh_fat = Covariance2D2Correlation2D(
                    SKPD_kernel_out_str.Rcoh_fat)
                Rincoh_thin = Covariance2D2Correlation2D(
                    SKPD_kernel_out_str.Rincoh_thin)
                Rincoh_fat = Covariance2D2Correlation2D(
                    SKPD_kernel_out_str.Rincoh_fat)

            # Steering matrix
            current_kz = np.zeros((num_acq, 1))
            for b_idx, stack_curr in enumerate(kz_stack.values()):
                current_kz[b_idx] = stack_curr[rg_vec_subs[rg_sub_idx],
                                               az_vec_subs[az_sub_idx]]

            A = np.exp(1j * current_kz * vertical_vector) / num_acq

            # Spectra estimation
            for m in np.arange(4):
                currR = (m == 0) * Rcoh_thin + (m == 1) * Rcoh_fat + (
                    m == 2) * Rincoh_thin + (m == 3) * Rincoh_fat
                if proc_conf.enable_super_resolution:
                    # Capon
                    currR = currR + proc_conf.regularization_noise_factor * np.eye(
                        currR.shape[0])
                    spectra.temp[:, m] = 1 / np.diag(
                        np.abs(
                            A.conj().transpose() @ np.linalg.inv(currR) @ A))
                else:
                    spectra.temp[:, m] = np.diag(
                        np.abs(A.conj().transpose() @ currR @ A))

            # Volume mechanism recognized thanks to its higher elevation
            max_index = np.argmax(spectra.temp, axis=0)
            max_m = np.argmax(max_index)
            tomo_cube[rg_sub_idx, az_sub_idx, :] = spectra.temp[:, max_m]

    # Estimating canopy elevation by looking at the decay
    class opt_str:
        pass

    opt_str.z = vertical_vector
    opt_str.thr = power_threshold  # Power decay threshold With respect to the peak value
    out_str = UpperThresholdForestHeight(tomo_cube, opt_str)
    canopy_height = out_str.z
    power_peak = out_str.peak
    canopy_height = medfilt2d(canopy_height.astype("float64"), kernel_size=5)

    return canopy_height, power_peak, rg_vec_subs, az_vec_subs, subs_F_r, subs_F_a
Пример #32
0
def pyzapspec(infile, 
              outfile='',
              maskfile='', 
              WRITE_OUTFILE=False,
              DEBUG_DIR='../test_data/',DEBUG=False,
              boxsize=9,nsigma=15.,subsigma=2.8,sfactor=1.0,
              nzap=0,mask=0,writemodel=0,verbose=0,skysubtract=0,
              zero=0,method=0,usamp=0,ybin=0,nan=-999,inmaskname=0,**kwargs):
    # defaults
    if len(outfile) == 0:
        dirpath,infile_base = os.path.split(infile)
        outfile_base = 'cr{}'.format(infile_base)
        outfile = os.path.join(dirpath,outfile_base)

    if len(maskfile) == 0:
        dirpath,infile_base = os.path.split(infile)
        maskfile_base = 'cr{}_mask.fits'.format(infile_base.split('.')[0])
        maskfile = os.path.join(dirpath,maskfile_base)

    tstart = time.time()

    # read in a copy of the input image to construct the output instance
    outimgHDU = fits.open(infile)
    outimg = outimgHDU[0].data
    header = outimgHDU[0].header

    dims = outimg.shape
    nx = dims[1] #spectral
    ny = dims[0] #spatial
    outmask = np.full((ny,nx),0)
    ymedimage = np.zeros((ny,nx))
    xmedimage = np.zeros((ny,nx))
    zapimage = np.zeros((ny,nx))

    nzapCount = 0
    iterCount = 0
    nbadCount = 1

    # first do a crude median subtraction of the sky lines
    # for x in range(nx):
    for x in xrange(nx):
        ymedimage[:,x] = np.median(outimg[:,x])
    ysubimage = outimg - ymedimage

    # now subtract traces so we can do a better sky-line subtraction
    # otherwise traces will be killed/wings oversubtracted
    sourceblocksize = 100
    nxblock = int(np.ceil(1.*nx/sourceblocksize))
    realsourcefiltsize = 1*sourceblocksize
    x0 = (np.arange(0,nxblock)*realsourcefiltsize).astype(int)
    x1 = np.append(x0[1:]-1,nx-1)
    xs0 = np.insert(x0[0:nxblock-1],x0[0],0)
    xs1 = np.append(x1[1:nxblock],x1[nxblock-1])

    # for b in range(nxblock):
    for b in xrange(nxblock):
        # for y in range(ny):
        for y in xrange(ny):
            xmedimage[y, x0[b]:x1[b]+1] = np.median(ysubimage[y, xs0[b]:xs1[b]+1])

    kernel = 1.*np.arange(0,realsourcefiltsize/2)
    kernelRev = kernel[::-1]
    kernel = np.append(kernel,kernelRev)
    kernel = kernel / np.sum(kernel)

    # pzap is convolving the 2D xmedimage with a 1D kernel, 
    # I'm pretty sure this is just a row by row convolution.
    # for xrow in range(ny):
    for xrow in xrange(ny):
        newRow = np.convolve(xmedimage[xrow,:],kernel,mode='same')
        xmedimage[xrow,:] = 1.*newRow
    xsubimage = outimg - xmedimage

    # here I'm assuming the method=0 (pzapspec line 226),
    # since I don't feel like coding up the alternative right now
    skyblocksize = 40
    nyblock = int(np.ceil(1.*ny/skyblocksize))
    realskyblocksize = 1.*skyblocksize
    y0 = (np.arange(0,nyblock)*realskyblocksize).astype(int)
    y1 = np.append(y0[1:]-1,ny-1)
    ys0 = np.insert(y0[0:nyblock-1],y0[0],0)
    ys1 = np.append(y1[1:nyblock],y1[nyblock-1])

    # for b in range(nyblock):
    #     for x in range(nx):
    for b in xrange(nyblock):
        for x in xrange(nx):
            scm = sigclipmedian(xsubimage[ys0[b]:ys1[b]+1,x])
            ymedimage[y0[b]:y1[b]+1,x] = scm

    kernel = 1.*np.arange(0,realskyblocksize/2)
    kernelRev = kernel[::-1]
    kernel = np.append(kernel,kernelRev)
    kernel = kernel / np.sum(kernel)

    # pzap is convolving the 2D xmedimage with a 1D kernel, 
    # I'm pretty sure this is just a column by column convolution.
    # for ycol in range(nx):
    for ycol in xrange(nx):
        newCol = np.convolve(ymedimage[:,ycol],kernel,mode='same')
        ymedimage[:,ycol] = 1.*newCol
    ysubimage = outimg - ymedimage


    skysubimage = outimg - ymedimage - xmedimage  # actually subtracts sky AND sources.

    filterimage = signal.medfilt2d(skysubimage,[boxsize,boxsize])
    residualimage = skysubimage - filterimage

    sigmaimage = np.zeros((ny,nx)) + np.nan
    nyb = round(ny / 200)
    yint = np.ceil(ny*1./nyb)

    # for yb in range(nyb):
    #     for x in range(nx):
    for yb in xrange(int(nyb)):
        for x in xrange(nx):
            # select the pixels in this chunk of rows
            selectRowIndsLo = int(yb*yint)
            selectRowIndsHi = int(np.min([(yb+1)*yint,ny])) # this indexing is potentially hazardous
            s = residualimage[selectRowIndsLo : selectRowIndsHi, x]
            goodInds = np.where(np.isfinite(s))[0] # select the pixels with finite values
            goodIndsCount = len(goodInds)
            # make sure we have enough pixels to process
            if goodIndsCount < 2:
                continue
            s = np.sort(s[goodInds]) #now actually select and sort them
            ns = len(s)
            # exclude the tails of the distribution
            keepIndsLo = int(np.floor(0.03*ns))
            keepIndsHi = int(np.ceil(0.97*ns))
            s = s[keepIndsLo:keepIndsHi]
            sigmaimage[selectRowIndsLo : selectRowIndsHi, x] = np.std(s,ddof=1)

    sigmaimage = np.sqrt(sigmaimage**2 + (np.fabs(filterimage*sfactor) + np.fabs(xmedimage)))  # this scaling assumes gain ~ 1
    residualnsigmaimage = residualimage / sigmaimage


    residualnsigmaimage_ravel = residualnsigmaimage.ravel() # this is a view of residualnsigmaimage
    zapimage_ravel = zapimage.ravel() # this is a view of zapimage
    skysubimage_ravel = skysubimage.ravel() # this is a view of zapimage

    crcores = np.where(residualnsigmaimage_ravel > nsigma)[0]
    newzaps = len(crcores)
    if newzaps > 0:
        zapimage_ravel[crcores] = 1


    # #this is hacky but MIGHT needed for host analysis 
    # zapimage[50:80, 1280:1360] = 0 #halpha keck
    # zapimage_ravel = zapimage.ravel()


    # res = writefits(zapimage,'zapimage1.fits',CLOBBER=True)
    if DEBUG:
        res = writefits(zapimage,'{}/zapimage.fits'.format(DEBUG_DIR),CLOBBER=True)


    outStr = 'Flagged {} initial affected pixels before percolation.'.format(newzaps)
    print(outStr)

    nperczap = 0
    # iterCount = 0
    iterCount = 1 #testing
    d0 = nx
    d1 = ny

    # percolate outward to get all pixels covered by each cosmic ray.
    while iterCount < 32:
        nextperc = np.where(zapimage_ravel == iterCount)[0]
        ct = len(nextperc)
        iterCount += 1
        newzaps = 0

        if ct <= 3:
            break

        nrays = len(nextperc)
        # for c in range(nrays):
        for c in xrange(nrays):
            ci = nextperc[c]

            # avoid the detector edges
            if ci < d0-1 or ci > nx*ny-d0-2:
                continue

                 # here's the structure assumed in the IDL
                 # ci is the CR pixel we're looking at, 
                 # this is trying to check the neighboring pixels

                 # ci-d0-1,    ci-d0,    ci-d0+1, 
                 #    ci-1,      ci      ci+1, 
                 # ci+d0-1,    ci+d0,    ci+d0+1 

                 # the way the IDL where() function works in the original pzap code
                 # makes indexing the arrays this way trivial since it does the
                 # ravel implicitly, but np.where() works differently. The easiest way
                 # to handle this is to retain this neighbor indexing from the
                 # original IDL pzap code and just run the np.where() on ravel'd
                 # numpy arrays.

                 # just to be explicit, if you have an N-D numpy array x, then
                 # x.ravel() will return a VIEW of that array mapped to a
                 # 1-D array, so a 2x2 array becomes a 4 element 1-D array. The
                 # important thing to remember is that if you modify a VIEW of
                 # an array, it will modify your original array too.

            coarseblockpos = np.array([       ci-d0, 
                                        ci-1,        ci+1, 
                                              ci+d0])
            newzap = np.where( (np.fabs(residualnsigmaimage_ravel[coarseblockpos]) > subsigma) &
                               (zapimage_ravel[coarseblockpos] == 0) )[0]
            addzap = len(newzap)

            if addzap > 0:
                zapimage_ravel[coarseblockpos[newzap]] = iterCount
                newzaps += addzap
            nperczap += addzap


    # finally, zap anything hiding in a cosmic ray "corner" (three neighbors are cosmic ray pixels)
    countneighbor = np.zeros((ny,nx))
    countneighbor_ravel = countneighbor.ravel()
    nextperc = np.where(zapimage_ravel > 0)[0]
    ct = len(nextperc)
    if ct > 3:
        nrays = len(nextperc)
        # for c in range(nrays):
        for c in xrange(nrays):
            ci = nextperc[c]

            # avoid the detector edges
            # this wasn't present in the IDL code, but I believe it's necessary
            if ci < d0-1 or ci > nx*ny-d0-2:
                continue
            coarseblockpos = np.array([ci-d0-1,    ci-d0,    ci-d0+1, 
                                          ci-1,              ci+1, 
                                       ci+d0-1,    ci+d0,    ci+d0+1])
            countneighbor_ravel[coarseblockpos] = countneighbor_ravel[coarseblockpos] + 1

        newzap = np.where((countneighbor_ravel > 3) &
                          (zapimage_ravel == 0))[0]
        newzaps = len(newzap)
        if newzaps > 0:
            zapimage_ravel[newzap] = iterCount +1


    # actually do the zapping by replacing with the sky plus local median value.
    ibad = np.where(zapimage_ravel != 0)[0]
    nbad = len(ibad)

    if nbad > 0:
        skysubimage_ravel[ibad] = np.nan # NaNs ignored by median() and derivative image modelers 

        # filterimage is really the replacement image (without sky and sources)
        filterimage = signal.medfilt2d(skysubimage,[boxsize,boxsize])

        witer = 0
        ctnan = -1

        while ctnan != 0 and witer <= 2:
            if ctnan > 0:
                # note not active 0th iteration, since we already did the median above
                # so its ok that filterbad is not yet defined...
                filterimage.ravel()[filterbad] = signal.medfilt2d(filterimage, [boxsize,boxsize]).ravel()[filterbad]

            filterbad = np.where( ~np.isfinite(filterimage.ravel()) & 
                                   np.isfinite(xmedimage.ravel()) &
                                   np.isfinite(ymedimage.ravel()))[0]
            ctnan = len(filterbad)
            witer += 1
            if witer == 2 and ctnan > 0:
                filterimage.ravel()[filterbad] = 0   # give up

        outimg.ravel()[ibad] = filterimage.ravel()[ibad] + ymedimage.ravel()[ibad] + xmedimage.ravel()[ibad]
        outmask.ravel()[ibad] = 1

    nzap += nbad

    outStr = 'Zapped {} pixels in {} seconds'.format(nzap,int(time.time()-tstart))
    print(outStr)

    histStr = 'Processed by pyzapspec UT {}'.format(datetime.datetime.now())
    header.add_history(histStr)

    # res = writefits(zapimage,'zapimage2.fits',CLOBBER=True)
    if DEBUG:
        res = writefits(ymedimage,'{}/ymedimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(xmedimage,'{}/xmedimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(ysubimage,'{}/ysubimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(xsubimage,'{}/xsubimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(skysubimage,'{}/fullsubimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(zapimage,'{}/zapimage.fits'.format(DEBUG_DIR),CLOBBER=True)
        res = writefits(outimg,outfile,header=header,CLOBBER=True)
        res = writefits(outmask,maskfile,CLOBBER=True)
        pdb.set_trace()

    if WRITE_OUTFILE:
        res = writefits(outimg,outfile,header=header,CLOBBER=True)
        # res = writefits(outmask,maskfile,CLOBBER=True)

    return outimg,outmask,header

    return 0
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if 'filters_statistics' in data_type:

        img_width, img_height = 200, 200

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # compute all filters statistics
        def get_stats(arr, I_filter):

            e1 = np.abs(arr - I_filter)
            L = np.array(e1)
            mu0 = np.mean(L)
            A = L - mu0
            H = A * A
            E = np.sum(H) / (img_width * img_height)
            P = np.sqrt(E)

            return mu0, P
            # return np.mean(I_filter), np.std(I_filter)

        stats = []

        kernel = np.ones((3, 3), np.float32) / 9
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        kernel = np.ones((5, 5), np.float32) / 25
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1.5)))

        stats.append(get_stats(arr, medfilt2d(arr, [3, 3])))

        stats.append(get_stats(arr, medfilt2d(arr, [5, 5])))

        stats.append(get_stats(arr, wiener(arr, [3, 3])))

        stats.append(get_stats(arr, wiener(arr, [5, 5])))

        wave = w2d(arr, 'db1', 2)
        stats.append(get_stats(arr, np.array(wave, 'float64')))

        data = []

        for stat in stats:
            data.append(stat[0])

        for stat in stats:
            data.append(stat[1])

        data = np.array(data)

    if 'statistics_extended' in data_type:

        data = get_image_features('filters_statistics', block)

        # add kolmogorov complexity
        bytes_data = np.array(block).tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.
        data = np.append(data, go_size)

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # add of svd entropy
        svd_entropy = utils.get_entropy(compression.get_SVD_s(arr))
        data = np.append(data, svd_entropy)

        # add sobel complexity (kernel size of 3)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=3)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        # add sobel complexity (kernel size of 5)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

    if 'lab' in data_type:

        data = transform.get_LAB_L_SVD_s(block)

    return data
Пример #34
0
def pretty2D(dataset):
    # split data into positive and negative sets for plotting contours
    pos_data = zeros([ptsMax1, ptsMax2])
    neg_data = zeros([ptsMax1, ptsMax2])
    someNegData = False
    for i in range(ptsMax1):
        for j in range(ptsMax2):
            if dataset[i, j] > 0:
                pos_data[i, j] = dataset[i, j]
            else:
                someNegData = True
                neg_data[i, j] = abs(dataset[i, j])

    # filtering
    pos_data = medfilt2d(pos_data, kernel_size=(5, 5))
    neg_data = medfilt2d(neg_data, kernel_size=(5, 5))

    # note: transposing
    extreme = 0.5 * max(abs(dataset).flatten())
    ax_main.contour(pos_data.T,
                    levels,
                    extent=[min(wn2), max(wn2),
                            min(wn1), max(wn1)],
                    origin='lower',
                    colors='r',
                    linewidths=0.5)
    if someNegData:
        ax_main.contour(neg_data.T,
                        levels,
                        extent=[min(wn2),
                                max(wn2),
                                min(wn1),
                                max(wn1)],
                        origin='lower',
                        colors='b',
                        linewidths=0.5)

    ax_main.set_xlabel(xname)
    ax_main.set_ylabel(yname)

    for p in peaks:
        ax_main.axvline(p, color='k', ls='-', lw=0.5)
        ax_main.axhline(p, color='k', ls='-', lw=0.5)

    ax_main.xaxis.set_major_locator(MaxNLocator(5))
    ax_main.yaxis.set_major_locator(MaxNLocator(5))

    if sys.argv[1] == sys.argv[2]:
        ax_main.plot([min(wn), max(wn)], [min(wn), max(wn)], 'k-', lw=0.5)

    divider = make_axes_locatable(ax_main)

    ax_right = divider.append_axes('right', 0)
    #   ax_right = divider.append_axes('right', 0.4, pad=0.05, sharey=ax_main)
    #   ax_right.fill_betweenx(wn2, 0, ref2, where=ref2<0, facecolor='0.9', lw=0)
    #   ax_right.fill_betweenx(wn2, ref2, 0, where=ref2>0, facecolor='0.9', lw=0)
    #   ax_right.plot(ref2, wn2, 'k-')
    for p in peaks:
        ax_right.axhline(p, color='k', ls='-', lw=0.5)
    if min(ref2) < 0. and max(ref2) > 0:
        ax_right.axvline(0., color='k', ls='--', lw=0.5, dashes=[2, 2])
    pylab.setp(ax_right.get_xticklabels() + ax_right.get_yticklabels(),
               visible=False)
    pylab.setp(ax_right.get_xticklines() + ax_right.get_yticklines(),
               visible=False)

    ax_top = divider.append_axes('top', 0)
    #   ax_top = divider.append_axes('top', 0.4, pad=0.05, sharex=ax_main)
    #   ax_top.fill_between(wn1, 0, ref1, where=ref1<0, facecolor='0.9', lw=0)
    #   ax_top.fill_between(wn1, ref1, 0, where=ref1>0, facecolor='0.9', lw=0)
    #   ax_top.plot(wn1, ref1, 'k-')
    for p in peaks:
        ax_top.axvline(p, color='k', ls='-', lw=0.5)
    if min(ref1) < 0. and max(ref1) > 0:
        ax_top.axhline(0., color='k', ls='--', lw=0.5, dashes=[2, 2])
    pylab.setp(ax_top.get_xticklabels() + ax_top.get_yticklabels(),
               visible=False)
    pylab.setp(ax_top.get_xticklines() + ax_top.get_yticklines(),
               visible=False)
    ax_top.set_ylim(0.9 * min(ref1), 1.1 * max(ref1))

    ax_main.set_xlim(min(wn1), max(wn1))
    ax_main.set_ylim(min(wn2), max(wn2))
    return ax_top
Пример #35
0
    # mask_x = get_mask(x, p=2, beta=0.2)

    fx = get_f(x, j=alpha)
    fx = gaussian_blur(fx, sigma=0.3, alpha=0.05, bin=True)
    mask_x = get_mask(x, p=2, beta=beta)

with tf.Session(graph=graph,
                config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    try:
        os.makedirs(SAVE_F)
        os.makedirs(SAVE_M)
    except os.error:
        pass

    # files = os.listdir(PATH)
    # for file in files:
    input_x = SimpleITK.GetArrayFromImage(
        SimpleITK.ReadImage(PATH + "/" + file))
    input_x = np.asarray(input_x).reshape([512, 512, 1])
    fx_, mask_x_ = sess.run(
        [fx, mask_x], feed_dict={x: np.asarray([input_x]).astype('float32')})
    fx_ = signal.medfilt2d(np.asarray(fx_)[0, :, :, 0, ], kernel_size=k_size1)
    mask_x_ = signal.medfilt2d(np.asarray(mask_x_)[0, :, :, 0, ],
                               kernel_size=k_size2)
    new_file = file.replace(".tiff", ".tiff")
    SimpleITK.WriteImage(SimpleITK.GetImageFromArray((1.0 - mask_x_) * fx_),
                         SAVE_F + "/" + new_file)
    SimpleITK.WriteImage(SimpleITK.GetImageFromArray(mask_x_),
                         SAVE_M + "/" + new_file)
    print(file + "==>" + new_file)
def noise_clipping_filter(lo_images_roi, hi_images_roi, apply_log=True):
    M = lo_images_roi.shape[0]
    de_images_clip = lo_images_roi.copy()

    #slopes from the paper
    up_slope = 0.44  #0.72
    down_slope = 0.72  #0.44

    #only return high images, as only the yahve been modified
    high_images = []
    for i in range(M):

        if apply_log:
            lo_bkg = signal.medfilt2d(
                np.log(lo_images_roi[i, :, :]).astype(np.float32))
            hi_bkg = signal.medfilt2d(
                np.log(hi_images_roi[i, :, :]).astype(np.float32))

            #then subtract each of images from background
            lo_contrast = (np.log(lo_images_roi[i, :, :]) - lo_bkg)
            hi_contrast = (np.log(hi_images_roi[i, :, :]) - hi_bkg)

            #convert zeros to one
            lo_contrast[lo_contrast == 0] = 1
            #hi_contrast[hi_contrast==0] =1

            #now define ratio
            ratio = hi_contrast / lo_contrast

            clip_hi_contrast = hi_contrast.copy()
            #now clip slopes
            clip_hi_contrast[
                ratio > up_slope] = up_slope * lo_contrast[ratio > up_slope]
            clip_hi_contrast[ratio < down_slope] = down_slope * lo_contrast[
                ratio < down_slope]

            #now convert back images to normal
            clip_hi_tmp = (clip_hi_contrast.copy() + hi_bkg)
            clip_hi_norm = (2**16 - 1) * (clip_hi_tmp) / (clip_hi_tmp.max() -
                                                          clip_hi_tmp.min())
            # clip_hi_contrast = np.exp(clip_hi_contrast)

            high_images.append(clip_hi_norm)

        else:
            #first compute low and high background
            lo_bkg = signal.medfilt2d(lo_images_roi[i, :, :].astype(
                np.float32))
            hi_bkg = signal.medfilt2d(hi_images_roi[i, :, :].astype(
                np.float32))

            #then subtract each of images from background
            lo_contrast = (lo_images_roi[i, :, :] - lo_bkg)
            hi_contrast = (hi_images_roi[i, :, :] - hi_bkg)

            #convert zeros to one
            lo_contrast[lo_contrast == 0] = 1
            hi_contrast[hi_contrast == 0] = 1

        #loot at the ratio between hi_contrast and lo_contrast
        #convert hi_pixel values min(h_pixel,0.72) and max(h_pixel,0.44)
        # plt.plot(lo_contrast.ravel(),clip_hi_constrast.ravel(),"b.")
        # plt.xlabel("Low contrast")
        # plt.ylabel("Low contrast")
        # plt.xlim(-0.1,0.1)
        # plt.ylim(-0.1,0.1)
        # plt.show()
        # fig,axes = plt.subplots(ncols=2)
        # ax = axes.ravel()
        # ax[0].imshow(hi_images_roi[-1],label="original",cmap="gray")
        # ax[1].imshow(high_images[-1],label="clipped",cmap="gray")
        # plt.show()

    return np.array(high_images)
Пример #37
0
def test_whole_image(imgIn,
                     blkSize,
                     numSample,
                     filter=False,
                     solver="L2",
                     display=False,
                     lambda1=0):
    # Make Sure That We are Not Trying to Sample More Points than the Size of the Mask
    assert (numSample < blkSize * blkSize)
    # Set the Desired Dimensions
    dimension = (blkSize, blkSize)  #(8, 8)  # Dimension for Block
    # Read the Boat Images
    matrix = imgRead(imgIn)[:, :, 0]  # Read Image into Matrix
    P, Q = matrix.shape[0], matrix.shape[1]  # Width and Length of Matrix
    # Create Transformation matrix
    T_Matrix = DCT_Matrix(dimension[0], dimension[1])
    # Split the Image Into Patches
    patches = image.extract_patches_2d(
        matrix, dimension)  # Turn into Patches the main Matrix
    # Random Initilize Mask
    mask = create_mask(numSample, dimension[0] *
                       dimension[1])  # New Mask is Made in Each Iteration
    # Transform the Patches
    new_image = []
    # MSE LIST
    MSE_List = []
    for patch in tqdm(patches):
        # Proceed To Do Function On Each Patch
        if solver == "L1":
            new_patch = transform_Patch(dimension, mask, patch, T_Matrix,
                                        solver)
        if solver == "L2":
            new_patch = transform_Patch(dimension, mask, patch, T_Matrix,
                                        solver)
        if solver == "Lasso":
            new_patch = transform_Patch(dimension,
                                        mask,
                                        patch,
                                        T_Matrix,
                                        solver,
                                        lambda1=lambda1)
        # Apply Median Filter
        if filter == True:
            new_patch = medfilt2d(new_patch, kernel_size=3)
        # Append Patch To Image to Recreate Image
        new_image.append(new_patch)
        # Calculate MSE Square
        MSE = mean_squared_error(patch, new_patch)
        MSE_List.append(MSE)
    # Convert List to Numpy Matrix in Desired Dimensions
    new_image = np.asarray(new_image)
    # print("")
    # print(new_image[0])
    # print(new_image.shape)
    # Calculate MSE In Each Image
    MSE_List = np.asarray(MSE_List)
    #print("MSE Average:", np.average(MSE_List))
    # Use SciKit To Reconstruct the Image
    reconstructed_image = image.reconstruct_from_patches_2d(new_image,
                                                            image_size=(P, Q))
    # Image MSE
    Image_MSE = mean_squared_error(matrix, reconstructed_image)
    print("MSE Total:", Image_MSE)
    if display == True:
        plt.imshow(reconstructed_image)
        fig, (ax_1, ax_2) = plt.subplots(nrows=1, ncols=2, sharex=True)
        ax_1.set_title("Original Image")
        ax_1.imshow(matrix)
        ax_2.set_title("Reconstructed Image")
        ax_2.imshow(reconstructed_image)
        plt.savefig("Block16x16.png")
        title = "Block Size = " + str(blkSize) + " x " + str(
            blkSize) + " & Mask=" + str(numSample)
        fig.suptitle(title)
        plt.show()
    return reconstructed_image, Image_MSE
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from scipy import signal
from scipy import misc
from skimage import data
from skimage import transform

imsrc = Image.open('pepper_salt_graph.jpg').convert('L')
ax = plt.imshow(imsrc, cmap='gray')
plt.show()

# median filter
print(imsrc)
imout = signal.medfilt2d(imsrc, 3)
plt.imshow(imout, cmap='gray')
plt.show()

# detailed image format conversion, see https://www.jianshu.com/p/bdd9bfcbedb7
#==============================================================================
# # now interpolate onto grid
#==============================================================================
data_points = np.array([oned_res_arr['grid_north'], oned_res_arr['grid_east']])

pad_n = mdm.pad_north
pad_e = mdm.pad_east

x = mdm.grid_north[pad_n:-pad_n - 1]
y = mdm.grid_east[pad_e:-pad_e - 1]

new_north, new_east = np.meshgrid(x, y)

# apply a median filter to get rid of outlying points
rs = signal.medfilt2d(oned_res_arr['res'], kernel_size=(5, 3))

new_res = np.zeros_like(mdm.res_model)
for z_index in range(mdm.nodes_z.size):
    new_res[pad_n:-pad_n, pad_e:-pad_e,
            z_index] = interpolate.griddata(data_points.T,
                                            rs[:, z_index],
                                            (new_north, new_east),
                                            method='cubic',
                                            fill_value=fill_res).T

new_res[np.where(new_res == 0.0)] = fill_res


#==============================================================================
# # need to fill the model so there are no hard boundaries
print 'Convolution'
print '~~~~~~~~~~~~~~'

#method = "_convolution"
#dem_convolution_filter = signal.convolve(image_array,filter1,mode='same')
#print dem_convolution_filter.shape

#filtered_image = dem_convolution_filter
#filtered_image_name = "dem_convolution_filter_kernel_%i" % kernel

print '~~~~~~~~~~~~~~'
print 'Median'
print '~~~~~~~~~~~~~~'

method = "_median"
dem_median_filter = signal.medfilt2d(image_array,kernel_size=kernel)
print dem_median_filter.shape

filtered_image = dem_median_filter
filtered_image_name = "dem_median_filter_kernel_%i" % kernel

print '~~~~~~~~~~~~~~'
print 'Resample the result and display'
print '~~~~~~~~~~~~~~'

#print "%s (%s):" % (filtered_image, method)
#print filtered_image

#print "%s (%s) resampled by a factor of 0.25" % (filtered_image_name, method)
#dem_filtered_025_refactor = ndimage.zoom(filtered_image, 0.25, order=0) ## essentially refactors the array by a factor the order value denotes the interpolation algorithm used
#print "%s (%s) resampled for viewing:" % (filtered_image_name, method)
Пример #41
0
def filtering(x, y, z):
    m_z = np.reshape(
        z,
        (len(np.unique(y)), len(np.unique(x))))  # Transform array into matrix
    s = medfilt2d(m_z)
    return np.reshape(s, (int(len(x)), ))
Пример #42
0
def augmentDepth(depth, obj_mask, mask_ori, shadowClK, shadowMK, blurK, blurS, depthNoise, method):

    sensor = True
    simplex = True

    if method == 0:
        pass
    elif method == 1:
        sensor = True
        simplex = False
    elif method == 2:
        sensor = False
        simplex = True

    # erode and blur mask to get more realistic appearance
    partmask = mask_ori
    partmask = partmask.astype(np.float32)
    #mask = partmask > (np.median(partmask) * 0.4)
    partmask = np.where(partmask > 0.0, 255.0, 0.0)

    cv2.imwrite('/home/sthalham/partmask.png', partmask)

    # apply shadow
    kernel = np.ones((shadowClK, shadowClK))
    partmask = cv2.morphologyEx(partmask, cv2.MORPH_OPEN, kernel)
    partmask = signal.medfilt2d(partmask, kernel_size=shadowMK)
    partmask = partmask.astype(np.uint8)
    mask = partmask > 20
    depth = np.where(mask, depth, 0.0)

    if sensor is True:
        depthFinal = cv2.resize(depth, None, fx=1 / 2, fy=1 / 2)
        res = (((depthFinal / 1000.0) * 1.41421356) ** 2)
        depthFinal = cv2.GaussianBlur(depthFinal, (blurK, blurK), blurS, blurS)
        # quantify to depth resolution and apply gaussian
        dNonVar = np.divide(depthFinal, res, out=np.zeros_like(depthFinal), where=res != 0)
        dNonVar = np.round(dNonVar)
        dNonVar = np.multiply(dNonVar, res)
        noise = np.multiply(dNonVar, depthNoise)
        depthFinal = np.random.normal(loc=dNonVar, scale=noise, size=dNonVar.shape)

        depth = cv2.resize(depthFinal, (resX, resY))

    if simplex is True:
        # fast perlin noise
        seed = np.random.randint(2 ** 31)
        N_threads = 4
        perlin = fns.Noise(seed=seed, numWorkers=N_threads)
        drawFreq = random.uniform(0.05, 0.2)  # 0.05 - 0.2
        # drawFreq = 0.5
        perlin.frequency = drawFreq
        perlin.noiseType = fns.NoiseType.SimplexFractal
        perlin.fractal.fractalType = fns.FractalType.FBM
        drawOct = [4, 8]
        freqOct = np.bincount(drawOct)
        rndOct = np.random.choice(np.arange(len(freqOct)), 1, p=freqOct / len(drawOct), replace=False)
        perlin.fractal.octaves = rndOct
        perlin.fractal.lacunarity = 2.1
        perlin.fractal.gain = 0.45
        perlin.perturb.perturbType = fns.PerturbType.NoPerturb

        # linemod
        if not sensor:
            # noise according to keep it unreal
            #noiseX = np.random.uniform(0.0001, 0.1, resX * resY) # 0.0001 - 0.1
            #noiseY = np.random.uniform(0.0001, 0.1, resX * resY) # 0.0001 - 0.1
            #noiseZ = np.random.uniform(0.01, 0.1, resX * resY) # 0.01 - 0.1
            #Wxy = np.random.randint(0, 10) # 0 - 10
            #Wz = np.random.uniform(0.0, 0.005) #0 - 0.005
            noiseX = np.random.uniform(0.001, 0.01, resX * resY)  # 0.0001 - 0.1
            noiseY = np.random.uniform(0.001, 0.01, resX * resY)  # 0.0001 - 0.1
            noiseZ = np.random.uniform(0.01, 0.1, resX * resY)  # 0.01 - 0.1
            Wxy = np.random.randint(2, 5)  # 0 - 10
            Wz = np.random.uniform(0.0001, 0.004)  # 0 - 0.005
        else:
            noiseX = np.random.uniform(0.001, 0.01, resX * resY) # 0.0001 - 0.1
            noiseY = np.random.uniform(0.001, 0.01, resX * resY) # 0.0001 - 0.1
            noiseZ = np.random.uniform(0.01, 0.1, resX * resY) # 0.01 - 0.1
            Wxy = np.random.randint(1, 5) # 1 - 5
            Wz = np.random.uniform(0.0001, 0.004) #0.0001 - 0.004
        # tless
        #noiseX = np.random.uniform(0.001, 0.1, resX * resY)  # 0.0001 - 0.1
        #noiseY = np.random.uniform(0.001, 0.1, resX * resY)  # 0.0001 - 0.1
        #noiseZ = np.random.uniform(0.01, 0.1, resX * resY)  # 0.01 - 0.1
        #Wxy = np.random.randint(2, 8)  # 0 - 10
        #Wz = np.random.uniform(0.0, 0.005)


        X, Y = np.meshgrid(np.arange(resX), np.arange(resY))
        coords0 = fns.empty_coords(resX * resY)
        coords1 = fns.empty_coords(resX * resY)
        coords2 = fns.empty_coords(resX * resY)

        coords0[0, :] = noiseX.ravel()
        coords0[1, :] = Y.ravel()
        coords0[2, :] = X.ravel()
        VecF0 = perlin.genFromCoords(coords0)
        VecF0 = VecF0.reshape((resY, resX))

        coords1[0, :] = noiseY.ravel()
        coords1[1, :] = Y.ravel()
        coords1[2, :] = X.ravel()
        VecF1 = perlin.genFromCoords(coords1)
        VecF1 = VecF1.reshape((resY, resX))

        coords2[0, :] = noiseZ.ravel()
        coords2[1, :] = Y.ravel()
        coords2[2, :] = X.ravel()
        VecF2 = perlin.genFromCoords(coords2)
        VecF2 = VecF2.reshape((resY, resX))

        x = np.arange(resX, dtype=np.uint16)
        x = x[np.newaxis, :].repeat(resY, axis=0)
        y = np.arange(resY, dtype=np.uint16)
        y = y[:, np.newaxis].repeat(resX, axis=1)

        # vanilla
        #fx = x + Wxy * VecF0
        #fy = y + Wxy * VecF1
        #fx = np.where(fx < 0, 0, fx)
        #fx = np.where(fx >= resX, resX - 1, fx)
        #fy = np.where(fy < 0, 0, fy)
        #fy = np.where(fy >= resY, resY - 1, fy)
        #fx = fx.astype(dtype=np.uint16)
        #fy = fy.astype(dtype=np.uint16)
        #Dis = depth[fy, fx] + Wz * VecF2
        #depth = np.where(Dis > 0, Dis, 0.0)

        #print(x.shape)
        #print(np.amax(depth))
        #print(np.amin(depth))
        Wxy_scaled = depth * 0.001 * Wxy
        Wz_scaled = depth * 0.001 * Wz
        # scale with depth
        fx = x + Wxy_scaled * VecF0
        fy = y + Wxy_scaled * VecF1
        fx = np.where(fx < 0, 0, fx)
        fx = np.where(fx >= resX, resX - 1, fx)
        fy = np.where(fy < 0, 0, fy)
        fy = np.where(fy >= resY, resY - 1, fy)
        fx = fx.astype(dtype=np.uint16)
        fy = fy.astype(dtype=np.uint16)
        Dis = depth[fy, fx] + Wz_scaled * VecF2
        depth = np.where(Dis > 0, Dis, 0.0)

    return depth
                )
            )

            # remove the static shift from the injection survey
            s, new_z = edi_inj.Z.no_ss(1 / sx, 1 / sy)
            edi_inj.Z.z = new_z

            # --> fill z arrays
            z_base_arr[ss, :, :, :] = edi_base.Z.z
            z_inj_arr[ss, :, :, :] = edi_inj.Z.z

    # --> apply a spatial median filter
    for ii in range(2):
        for jj in range(2):
            z_base_arr[:, :, ii, jj].real = sps.medfilt2d(
                z_base_arr[:, :, ii, jj].real, kernel_size=mks
            )
            z_base_arr[:, :, ii, jj].imag = sps.medfilt2d(
                z_base_arr[:, :, ii, jj].imag, kernel_size=mks
            )
            z_inj_arr[:, :, ii, jj].real = sps.medfilt2d(
                z_inj_arr[:, :, ii, jj].real, kernel_size=mks
            )
            z_inj_arr[:, :, ii, jj].imag = sps.medfilt2d(
                z_inj_arr[:, :, ii, jj].imag, kernel_size=mks
            )

    # --> rewrite files
    for ss, station in enumerate(station_list):
        fn_base = os.path.join(edipath_base, station + ".edi")
        fn_inj = os.path.join(edipath_inj, station + ".edi")
Пример #44
0
def getLimitData(pipeline, pos):
    color_image, depth_image = getInputData(pipeline)
    color_cut_image = color_image[pos[0]:pos[1], pos[2]:pos[3]]
    depth_cut_image = depth_image[pos[0]:pos[1], pos[2]:pos[3]].astype(float)
    depth_cut_image = signal.medfilt2d(depth_cut_image, (3, 3))
    return color_cut_image, depth_cut_image
Пример #45
0
def median_filtering(img):
    smooth_img = np.zeros(img.shape)
    smooth_img[:, :, 0] = signal.medfilt2d(img[:, :, 0])
    smooth_img[:, :, 1] = signal.medfilt2d(img[:, :, 1])
    smooth_img[:, :, 2] = signal.medfilt2d(img[:, :, 2])
    return smooth_img
Пример #46
0
def find_slits_corners_aps_1id(
    img,
    method='quadrant+',
    medfilt2_kernel_size=3,
    medfilt_kernel_size=23,
):
    """
    Automatically locate the slit box location by its four corners.

    NOTE:
    The four slits that form a binding box is the current setup at aps_1id,
    which reduce the illuminated region on the detector. Since the slits are
    stationary, they can serve as a reference to check detector drifting
    during the scan. Technically, the four slits should be used to find
    the transformation matrix (not necessarily affine) to correct the image.
    However, since we are dealing with 2D images with very little distortion,
    affine transformation matrices were used for approximation. Therefore
    the "four corners" are used instead of all four slits.

    Parameters
    ----------
    img : np.ndarray
        2D images
    method : str,  ['simple', 'quadrant', 'quadrant+'], optional
        method for auto detecting slit corners
            - simple    :: assume a rectange slit box, fast but less accurate
                           (1 pixel precision)
            - quadrant  :: subdivide the image into four quandrant, then use
                           an explicit method to find the corner
                           (1 pixel precision)
            - quadrant+ :: similar to quadrant, but use curve_fit (gauss1d) to
                           find the corner
                           (0.1 pixel precision)
    medfilt2_kernel_size : int, optional
        2D median filter kernel size for noise reduction
    medfilt_kernel_size : int, optional
        1D median filter kernel size for noise reduction

    Returns
    -------
    tuple
        autodetected slit corners (counter-clockwise order)
        (upperLeft, lowerLeft, lowerRight, upperRight)
    """
    img = medfilt2d(
        np.log(img.astype(np.float64)),
        kernel_size=medfilt2_kernel_size,
    )
    rows, cols = img.shape

    # simple method is simple, therefore it stands out
    if method.lower() == 'simple':
        # assuming a rectangle type slit box
        col_std = medfilt(np.std(img, axis=0), kernel_size=medfilt_kernel_size)
        row_std = medfilt(np.std(img, axis=1), kernel_size=medfilt_kernel_size)
        # NOTE: in the tiff img
        #  x is col index, y is the row index  ==> key point here !!!
        #  img slicing is doen with img[row_idx, col_idx]
        #  ==> so the image idx and corner position are FLIPPED!
        _left = np.argmax(np.gradient(col_std))
        _right = np.argmin(np.gradient(col_std))
        _top = np.argmax(np.gradient(row_std))
        _bottom = np.argmin(np.gradient(row_std))

        cnrs = np.array([
            [_left, _top],
            [_left, _bottom],
            [_right, _bottom],
            [_right, _top],
        ])
    else:
        # predefine all quadrants
        # Here let's assume that the four corners of the slit box are in the
        # four quadrant defined by the center of the image
        # i.e.
        #  uppper left quadrant: img[0     :cnt[1], 0     :cnt[0]]  => quadarnt origin =  (0,           0)
        #  lower  left quadrant: img[cnt[1]:      , 0     :cnt[0]]  => quadarnt origin =  (cnt[0],      0)
        #  lower right quadrant: img[cnt[1]:      , cnt[0]:      ]  => quadarnt origin =  (cnt[0], cnt[1])
        # upper right quadrant: img[0     :cnt[1], cnt[0]:      ]  => quadarnt
        # origin =  (0,      cnt[1])
        # center of image that defines FOUR quadrants
        cnt = [int(cols / 2), int(rows / 2)]
        Quadrant = namedtuple('Quadrant', 'img col_func, row_func')
        quadrants = [
            Quadrant(img=img[0:cnt[1], 0:cnt[0]],
                     col_func=np.argmax,
                     row_func=np.argmax),  # upper left,  1st quadrant
            # lower left,  2nd quadrant
            Quadrant(img=img[cnt[1]:, 0:cnt[0]],
                     col_func=np.argmax,
                     row_func=np.argmin),
            # lower right, 3rd quadrant
            Quadrant(img=img[cnt[1]:, cnt[0]:],
                     col_func=np.argmin,
                     row_func=np.argmin),
            # upper right, 4th quadrant
            Quadrant(img=img[0:cnt[0], cnt[1]:],
                     col_func=np.argmin,
                     row_func=np.argmax),
        ]
        # the origin in each quadrants ==> easier to set it here
        quadrantorigins = np.array([
            [0, 0],  # upper left,  1st quadrant
            [0, cnt[1]],  # lower left,  2nd quadrant
            # lower right, 3rd quadrant
            [cnt[0], cnt[1]],
            [cnt[1], 0],  # upper right, 4th quadrant
        ])
        # init four corners
        cnrs = np.zeros((4, 2))
        if method.lower() == 'quadrant':
            # the standard quadrant method
            for i, q in enumerate(quadrants):
                cnrs[i, :] = np.array([
                    q.col_func(
                        np.gradient(
                            medfilt(np.std(q.img, axis=0),
                                    kernel_size=medfilt_kernel_size))
                    ),  # x is col_idx
                    q.row_func(
                        np.gradient(
                            medfilt(np.std(q.img, axis=1),
                                    kernel_size=medfilt_kernel_size))),
                    # y is row_idx
                ])
            # add the origin offset back
            cnrs = cnrs + quadrantorigins
        elif method.lower() == 'quadrant+':
            # use Gaussian curve fitting to achive subpixel precision
            # TODO:
            # improve the curve fitting with Lorentz and Voigt fitting function
            for i, q in enumerate(quadrants):
                # -- find x subpixel position
                cnr_x_guess = q.col_func(
                    np.gradient(
                        medfilt(np.std(q.img, axis=0),
                                kernel_size=medfilt_kernel_size)))
                # isolate the strongest peak to fit
                tmpx = np.arange(cnr_x_guess - 10, cnr_x_guess + 11)
                tmpy = np.gradient(np.std(q.img, axis=0))[tmpx]
                # tmpy[0] is the value from the highest/lowest pixle
                # tmpx[0] is basically cnr_x_guess
                # 5.0 is the guessted std,
                coeff, _ = curve_fit(
                    gauss1d,
                    tmpx,
                    tmpy,
                    p0=[tmpy[0], tmpx[0], 5.0],
                    maxfev=int(1e6),
                )
                cnrs[i, 0] = coeff[1]  # x position
                # -- find y subpixel positoin
                cnr_y_guess = q.row_func(
                    np.gradient(
                        medfilt(np.std(q.img, axis=1),
                                kernel_size=medfilt_kernel_size)))
                # isolate the peak (x, y here is only associated with the peak)
                tmpx = np.arange(cnr_y_guess - 10, cnr_y_guess + 11)
                tmpy = np.gradient(np.std(q.img, axis=1))[tmpx]
                coeff, _ = curve_fit(
                    gauss1d,
                    tmpx,
                    tmpy,
                    p0=[tmpy[0], tmpx[0], 5.0],
                    maxfev=int(1e6),
                )
                cnrs[i, 1] = coeff[1]  # y posiiton
            # add the quadrant shift back
            cnrs = cnrs + quadrantorigins

        else:
            raise NotImplementedError(
                "Available methods are: simple, quadrant, quadrant+")

    # return the slit corner detected
    return cnrs
Пример #47
0
def test_whole_image_KFOLD(imgIn,
                           blkSize,
                           numSample,
                           filter=False,
                           solver="L2",
                           display=False,
                           lambda1=0,
                           K_FOLD=True,
                           Solve=True,
                           fileName="Results.txt",
                           sampleSize=100):
    # Make Sure That We are Not Trying to Sample More Points than the Size of the Mask
    assert (numSample < blkSize * blkSize)

    # Set the Desired Dimensions
    dimension = (blkSize, blkSize)  #(8, 8)  # Dimension for Block

    # Read the Boat Images
    matrix = imgRead(imgIn)[:, :, 0]  # Read Image into Matrix
    P, Q = matrix.shape[0], matrix.shape[1]  # Width and Length of Matrix

    # Create Transformation matrix
    T_Matrix = DCT_Matrix(dimension[0], dimension[1])

    # Split the Image Into Patches
    patches = image.extract_patches_2d(
        matrix, dimension)  # Turn into Patches the main Matrix

    # Random Initilize Mask
    mask = create_mask(numSample, dimension[0] *
                       dimension[1])  # New Mask is Made in Each Iteration
    # Conduct K-Folds to find best alpha value
    txtFile = fileName + ".txt"
    if K_FOLD == True and solver == "Lasso":
        ############################# Declare Training Patch ###########################################################
        training_patches = []
        for i in range(sampleSize):
            j = np.random.randint(len(patches))
            training_patches.append(patches[j])
        training_patches = np.asarray(training_patches)
        ############################ Lambda List #######################################################################
        # Declare Lambda Range
        lambdas = [10**(-i) for i in range(1, 6)]
        # Begin Optimal Testing
        #print("Conducting K-Fold Testing to Find Optimal Lambda")
        kf = KFold(n_splits=6, shuffle=True)  # Declare K-Fold
        lambda_list = []  # Lambda List
        lambdas_list = []  # Lambdas
        index_i = 1  # Index
        ################################## Conduct K-Folds #############################################################
        for i in tqdm(range(20), desc="Run", position=0):
            print("##################### Running Trial", index_i,
                  "######################")  # Print Fold Number
            index_i += 1
            for train_index, test_index in kf.split(training_patches):

                X_train, X_test = training_patches[
                    train_index], training_patches[test_index]
                ################################ Training Fold #############################################################
                # Lambda Error
                lambda_error_list = []
                # Now We Have The Index of the Models We Need To train
                for lambda2 in tqdm(lambdas, desc="Lambda", position=0):
                    # Random Initilize Mask
                    mask = create_mask(
                        numSample, dimension[0] *
                        dimension[1])  # New Mask is Made in Each Iteration
                    MSE_LIST = []
                    for patch in tqdm(
                            X_train, desc="Training", position=1
                    ):  # Go Through the Training Portion for Lambda
                        local_mse_list = [
                        ]  ## Local MSE List to Calculate the 20 Lambda Exam
                        for n in range(1):  # Run Through Each Loop 20 Times
                            new_patch = transform_Patch(
                                dimension,
                                mask,
                                patch,
                                T_Matrix,
                                solver,
                                lambda1=lambda2)  # Optimize Patch
                            MSE = mean_squared_error(
                                patch, new_patch)  # Calculate MSE
                            local_mse_list.append(
                                MSE)  # Append to Local MSE LIST
                        MSE_LIST.append(mean(local_mse_list))
                    # Now Convert the MSE_LIST TO NUMPY and calculate Mean
                    mean_mse_list = mean(MSE_LIST)
                    #print("Average of Lambda:", lambda2, "is:", mean_mse_list)
                    lambda_error_list.append(mean_mse_list)
                #print(lambdas)
                #print(lambda_error_list)
                ############################################################################################################
                best_lambda_index = lambda_error_list.index(
                    min(lambda_error_list))
                best_lambda = lambdas[best_lambda_index]
                #print("Best Lambda:",best_lambda)
                test_mse_list = []
                for patch in tqdm(X_test, desc="Testing", position=1):
                    new_patch = transform_Patch(
                        dimension,
                        mask,
                        patch,
                        T_Matrix,
                        solver,
                        lambda1=best_lambda)  # Optimize Patch
                    MSE = mean_squared_error(patch, new_patch)  # Calculate MSE
                    test_mse_list.append(MSE)
                mean_test_mse_list = mean(test_mse_list)
                #print("MSE of Test:", mean_test_mse_list)
                lambdas_list.append(best_lambda)
                lambda_list.append(mean_test_mse_list)
            # Find the Best Overall Lambda
        min_final_mse = min(lambda_list)
        print("Minimum MSE:", min_final_mse)
        final_lambda_index = lambda_list.index(min_final_mse)
        final_lambda = lambdas_list[final_lambda_index]
        print("Best Lambda:", final_lambda)
        print_line = str(final_lambda) + " " + str(min_final_mse) + "\n"
        # Append Final Results to File
        with open(fileName, 'a') as f:
            f.write(print_line)

    if Solve == True:
        print(
            '############################ Solving Final Image Using best Lambda########################################'
        )
        # Random Initilize Mask
        mask = create_mask(numSample, dimension[0] *
                           dimension[1])  # New Mask is Made in Each Iteration
        patches = image.extract_patches_2d(
            matrix, dimension)  # Turn into Patches the main Matrix
        # Transform the Patches
        new_image = []
        # MSE LIST
        MSE_List = []
        for patch in tqdm(patches):
            # Proceed To Do Function On Each Patch
            if solver == "L1":
                new_patch = transform_Patch(dimension, mask, patch, T_Matrix,
                                            solver)
            if solver == "L2":
                new_patch = transform_Patch(dimension, mask, patch, T_Matrix,
                                            solver)
            if solver == "Lasso":
                new_patch = transform_Patch(dimension,
                                            mask,
                                            patch,
                                            T_Matrix,
                                            solver,
                                            lambda1=final_lambda)
            # Apply Median Filter
            if filter == True:
                new_patch = medfilt2d(new_patch, kernel_size=3)
            # Append Patch To Image to Recreate Image
            new_image.append(new_patch)
            # Calculate MSE Square
            MSE = mean_squared_error(patch, new_patch)
            MSE_List.append(MSE)
        # Convert List to Numpy Matrix in Desired Dimensions
        new_image = np.asarray(new_image)
        # print("")
        # print(new_image[0])
        # print(new_image.shape)

        # Calculate MSE In Each Image
        MSE_List = np.asarray(MSE_List)
        #print("MSE Average:", np.average(MSE_List))

        # Use SciKit To Reconstruct the Image
        reconstructed_image = image.reconstruct_from_patches_2d(new_image,
                                                                image_size=(P,
                                                                            Q))
        # Image MSE
        Image_MSE = mean_squared_error(matrix, reconstructed_image)

        if display == True:
            with open(fileName, 'a') as f:
                linePrint = "MSE Total With Original: " + str(Image_MSE) + "\n"
                f.write(linePrint)
            plt.imshow(reconstructed_image)
            fig, (ax_1, ax_2) = plt.subplots(nrows=1, ncols=2, sharex=True)
            ax_1.set_title("Original Image")
            ax_1.imshow(matrix)
            ax_2.set_title("Reconstructed Image")
            ax_2.imshow(reconstructed_image)
            plt.savefig("Block16x16.png")
            title = "Block Size = " + str(blkSize) + " x " + str(
                blkSize) + " & Mask=" + str(numSample) + " Lambda:" + str(
                    final_lambda)
            fig.suptitle(title)
            plotName = fileName + "png"
            plt.savefig(plotName)
        return reconstructed_image, Image_MSE
def Conv_MRELBP(image, pars, savepath=None, sample=None, normalize=True):
    """Calculates MRELBP using convolutions. Alternate method for calculating LBP features."""
    # Unpack parameters
    n = pars['N']
    r_large = pars['R']
    r_small = pars['r']
    w_large = pars['wl']
    w_small = pars['ws']
    w_center = pars['wc']

    # Whiten the image
    imu = image.mean()
    istd = image.std()
    im = (image - imu) / istd
    # Get image dimensions
    h, w = im.shape[:2]
    # Make kernels
    kR = []
    kr = []
    dtheta = np.pi * 2 / n
    for k in range(0, n):
        _kernel = weight_matrix_bilin(r_large, -k * dtheta, val=0)
        kR.append(_kernel)

        _kernel = weight_matrix_bilin(r_small, -k * dtheta, val=0)
        kr.append(_kernel)

    # Make median filtered images
    imc = medfilt2d(im.copy(), w_center)
    imR = medfilt2d(im.copy(), w_large)
    imr = medfilt2d(im.copy(), w_small)

    # Get LBP images
    neighbR = np.zeros((h, w, n))
    neighbr = np.zeros((h, w, n))
    for k in range(n):
        _neighb = correlate(imR, kR[k])
        neighbR[:, :, k] = _neighb
        _neighb = correlate(imr, kr[k])
        neighbr[:, :, k] = _neighb

    # Crop valid convolution region
    d = r_large + w_large // 2
    h -= 2 * d
    w -= 2 * d

    neighbR = neighbR[d:-d, d:-d, :]
    neighbr = neighbr[d:-d, d:-d, :]
    imc = imc[d:-d, d:-d]

    # Subtraction
    _muR = neighbR.mean(2).reshape(h, w, 1)
    for k in range(n):
        try:
            muR = np.concatenate((muR, _muR), 2)
        except NameError:
            muR = _muR

    _mur = neighbr.mean(2).reshape(h, w, 1)
    for k in range(n):
        try:
            mur = np.concatenate((mur, _mur), 2)
        except NameError:
            mur = _mur

    diffc = (imc - imc.mean()) >= 0
    diffR = (neighbR - muR) >= 0
    diffr = (neighbr - mur) >= 0
    diffR_r = (neighbR - neighbr) >= 0

    # Compute lbp images
    lbpc = diffc
    lbpR = np.zeros((h, w))
    lbpr = np.zeros((h, w))
    lbpR_r = np.zeros((h, w))
    for k in range(n):
        lbpR += diffR[:, :, k] * (2**k)
        lbpr += diffr[:, :, k] * (2**k)
        lbpR_r += diffR_r[:, :, k] * (2**k)
    # Get LBP histograms
    histc = np.zeros((1, 2))
    histR = np.zeros((1, 2**n))
    histr = np.zeros((1, 2**n))
    histR_r = np.zeros((1, 2**n))

    histc[0, 0] = (lbpc == 1).astype(np.float32).sum()
    histc[0, 1] = (lbpc == 0).astype(np.float32).sum()

    for k in range(2**n):
        histR[0, k] = (lbpR == k).astype(np.float32).sum()
        histr[0, k] = (lbpr == k).astype(np.float32).sum()
        histR_r[0, k] = (lbpR_r == k).astype(np.float32).sum()

    # Mapping
    mapping = get_mapping(n)
    histR = map_lbp(histR, mapping)
    histr = map_lbp(histr, mapping)
    histR_r = map_lbp(histR_r, mapping)

    # Histogram normalization
    if normalize:
        histc /= np.sum(histc)
        histR /= np.sum(histR)
        histr /= np.sum(histr)
        histR_r /= np.sum(histR_r)

    # Append histograms
    hist = np.concatenate((histc, histR, histr, histR_r), 1)

    if savepath is not None and sample is not None:
        print_images([lbpR, lbpr, lbpR_r],
                     subtitles=['Large', 'Small', 'Radial'],
                     title=sample,
                     save_path=savepath,
                     sample=sample + '.png')

    return hist
def MRELBP(image,
           parameters,
           eps=1e-06,
           normalize=False,
           args=None,
           sample=None):
    """ Takes Median Robust Extended Local Binary Pattern from image im
    Uses n neighbours from radii r_large and r_small, r_large must be larger than r_small
    Median filter uses kernel sizes weight_center for center pixels, w_r[0] for larger radius and w_r[1]
    #or smaller radius
    Grayscale values are centered at their mean and scales with global standad deviation

    Parameters
    ----------
    image : ndarray
        Input image. Standardized to local contrast in the pipelines.
    parameters : dict
        Dictionary containing LBP parameters:
        N = Number of neighbours used in MRELBP (4 orthogonal and 4 diagonal neighbours).
        R = Distance of center pixel from neighbours used in obtaining large image.
        r = Distance of center pixel from neighbours used in obtaining small image.
        wc = Kernel size used in median filtering center image.
        wl = Kernel size used in median filtering large LBP image.
        ws = Kernel size used in median filtering small LBP image.
    eps : float
        Error residual. Defaults to 1e-6
    normalize : bool
        Choice whether to normalize LBP histograms by sum.
    args : str
        Path for saving LBP images.
    sample : str
        Name of the sample used in saving images.
    Returns
    -------
    MRELBP histograms calculated with rotation invariant uniform mapping.
    Length of 32 (2 center + 10 large + 10 small + 10 radial).
    """

    n = parameters['N']
    r_large = parameters['R']
    r_small = parameters['r']
    weight_center = parameters['wc']
    weight_large = parameters['wl']
    weight_small = parameters['ws']

    # Mean grayscale value and std
    mean_image = image.mean()
    std_image = image.std()

    # Centering and scaling with std
    image_scaled = (image - mean_image) / std_image

    # Median filtering
    image_center = medfilt2d(image_scaled.copy(), weight_center)
    # Center pixels
    dist = round(r_large + (weight_large - 1) / 2)
    image_center = image_center[dist:-dist, dist:-dist]
    # Subtracting the mean pixel value from center pixels
    image_center -= image_center.mean()
    # Binning center pixels
    center_hist = np.zeros((1, 2))
    center_hist[0, 0] = np.sum(image_center >= 0)
    center_hist[0, 1] = np.sum(image_center < 0)

    # --------------- #
    # center_hist[0,0] = np.sum(image_center>=-1e-06)
    # center_hist[0,1] = np.sum(image_center<-1e-06)
    # --------------- #

    # Median filtered images for large and small radius
    image_large = medfilt2d(image_scaled.copy(), weight_large)
    image_small = medfilt2d(image_scaled.copy(), weight_small)

    # Neighbours
    pi = np.pi
    # Empty arrays for the neighbours
    row, col = np.shape(image_center)
    n_large = np.zeros((row, col, n))
    n_small = np.zeros((row, col, n))

    for k in range(n):
        # Angle to the neighbour
        theta = k * (-1 * 2 * pi / n)
        # Large neighbourhood
        x = dist + r_large * np.cos(theta)
        y = dist + r_large * np.sin(theta)
        if abs(x - round(x)) < eps and abs(y - round(y)) < eps:
            x = int(round(x))
            y = int(round(y))
            p = image_large[y:y + row, x:x + col]
        else:
            p = image_bilinear(image_large, col, x, row, y)
        n_large[:, :, k] = p
        # Small neighbourhood
        x = dist + r_small * np.cos(theta)
        y = dist + r_small * np.sin(theta)
        if abs(x - round(x)) < eps and abs(y - round(y)) < eps:
            x = int(round(x))
            y = int(round(y))
            p = image_small[y:y + row, x:x + col]
        else:
            p = image_bilinear(image_small, col, x, row, y)
        n_small[:, :, k] = p

    # Thresholding radial neighbourhood
    n_radial = n_large - n_small

    # Subtraction of means
    mean_large = n_large.mean(axis=2)
    mean_small = n_small.mean(axis=2)
    for k in range(n):
        n_large[:, :, k] -= mean_large
        n_small[:, :, k] -= mean_small

    # Converting to binary images and taking the lbp values

    # Initialization of arrays
    lbp_large = np.zeros((row, col))
    lbp_small = np.zeros((row, col))
    lbp_radial = np.zeros((row, col))

    for k in range(n):
        lbp_large += (n_large[:, :, k] >=
                      0) * 2**k  # NOTE ACCURACY FOR THRESHOLDING!!!
        lbp_small += (n_small[:, :, k] >= 0) * 2**k
        lbp_radial += (n_radial[:, :, k] >= 0) * 2**k
        # --------------- #
        # lbp_large += (n_large[:,:,k] >= -(eps ** 2)) * 2 ** k  # NOTE ACCURACY FOR THRESHOLDING!!!
        # lbp_small += (n_small[:,:,k] >= -(eps ** 2)) * 2 ** k
        # lbp_radial += (n_radial[:,:,k] >= -(eps ** 2)) * 2 ** k
        # --------------- #

    # Calculating histograms with 2 ^ N bins
    large_hist = np.zeros((1, 2**n))
    small_hist = np.zeros((1, 2**n))
    radial_hist = np.zeros((1, 2**n))
    for k in range(2**n):
        large_hist[0, k] = np.sum(lbp_large == k)
        small_hist[0, k] = np.sum(lbp_small == k)
        radial_hist[0, k] = np.sum(lbp_radial == k)

    # Rotation invariant uniform mapping
    mapping = get_mapping(n)
    large_hist = map_lbp(large_hist, mapping)
    small_hist = map_lbp(small_hist, mapping)
    radial_hist = map_lbp(radial_hist, mapping)

    # # Individual histogram normalization
    # if  normalize:
    #     center_hist /= np.sum(center_hist)
    #     large_hist /= np.sum(large_hist)
    #     small_hist /= np.sum(small_hist)
    #     radial_hist /= np.sum(radial_hist)

    # Concatenate histograms
    hist = np.concatenate((center_hist, large_hist, small_hist, radial_hist),
                          1)

    if normalize:
        hist /= np.sum(hist)

    if args.save_images and args is not None and (('21_L3L' in sample) or
                                                  ('20_R2M' in sample)):

        # Map LBP images
        lbp_large_mapped = map_lbp(lbp_large, mapping)
        lbp_small_mapped = map_lbp(lbp_small, mapping)
        lbp_radial_mapped = map_lbp(lbp_radial, mapping)
        lbp_list = [lbp_large_mapped, lbp_small_mapped, lbp_radial_mapped]

        # Load coefficients
        coefs, _ = load_excel(args.save_path + '/' + 'weights_surf_sub.xlsx',
                              titles=['Weights_lin', 'Weights_log'])
        thresh = 0.1
        lin = coefs[0]
        log = coefs[1]
        lin = np.abs(np.insert(lin, [2, 9, 10, 17], 0)) > thresh
        log = np.abs(np.insert(log, [2, 9, 10, 17], 0)) > thresh

        masks = [
            np.zeros(lbp_large.shape),
            np.zeros(lbp_large.shape),
            np.zeros(lbp_large.shape)
        ]

        for mask in range(len(masks)):
            for ind in range(int(np.max(lbp_large_mapped)) + 1):
                masks[mask] += (ind + 1) * (lbp_list[mask]
                                            == ind) * log[2 + mask * 10:2 +
                                                          (mask + 1) * 10][ind]

        # No instances in LBP_large (0,8) and LBP_small (0,8)
        print_images(lbp_list,
                     subtitles=['Large', 'Small', 'Radial'],
                     title=sample,
                     sample=sample + '.png')

    # Print center image
    fig = plt.figure(dpi=300)
    ax = fig.add_subplot(111)
    ax.imshow(image_center >= 0)
    plt.title('Center')
    plt.savefig(args.save_path + '/Images/LBP/' + sample + '_center.png',
                transparent=True)
    plt.close()

    # Print unmapped LBP
    #print_images([lbp_large, lbp_small, lbp_radial], subtitles=['Large', 'Small', 'Radial'], title=sample,
    #             save_path=args.save_path + '/Images/LBP/', sample=sample + '.png')

    return hist
Пример #50
0
def manipulate_depth(fn_gt, fn_depth, fn_part, fn_mask):

    with open(fn_gt, 'r') as stream:
        query = yaml.load(stream)
        bboxes = np.zeros((len(query), 5), np.int)
        poses = np.zeros((len(query), 7), np.float32)
        mask_ids = np.zeros((len(query)), np.int)
        for j in range(len(query) - 1):
            qr = query[j]
            class_id = qr['class_id']
            bbox = qr['bbox']
            mask_ids[j] = int(qr['mask_id'])
            pose = np.array(qr['pose']).reshape(4, 4)
            bboxes[j, 0] = class_id
            bboxes[j, 1:5] = np.array(bbox)
            q_pose = tf3d.quaternions.mat2quat(pose[:3, :3])
            poses[j, :4] = np.array(q_pose)
            poses[j, 4:7] = np.array([pose[0, 3], pose[1, 3], pose[2, 3]])

    pt = Imath.PixelType(Imath.PixelType.FLOAT)
    golden = OpenEXR.InputFile(fn_depth)
    dw = golden.header()['dataWindow']
    size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
    redstr = golden.channel('R', pt)
    depth = np.fromstring(redstr, dtype=np.float32)
    depth.shape = (size[1], size[0])

    centerX = kin_res_x / 2.0
    centerY = kin_res_y / 2.0

    uv_table = np.zeros((kin_res_y, kin_res_x, 2), dtype=np.int16)
    column = np.arange(0, kin_res_y)
    uv_table[:, :, 1] = np.arange(0, kin_res_x) - centerX
    uv_table[:, :, 0] = column[:, np.newaxis] - centerY
    uv_table = np.abs(uv_table)

    depth = depth * np.cos(
        np.radians(fov / kin_res_x * np.abs(uv_table[:, :, 1]))) * np.cos(
            np.radians(fov / kin_res_x * uv_table[:, :, 0]))

    depth = cv2.resize(depth, (resX, resY))

    # erode and blur mask to get more realistic appearance
    partmask = cv2.imread(fn_part, 0)
    partmask = cv2.resize(partmask, (resX, resY))
    partmask = partmask.astype(np.float32)
    mask = partmask > (np.median(partmask) * 0.4)
    partmask = np.where(mask, 255.0, 0.0)
    ###################################################
    # DON'T REMOVE, best normal map up to now !!! tested without lateral noise !!!
    kernel = np.ones((7, 7))
    #partmask = signal.medfilt2d(partmask, kernel_size=7)
    partmask = cv2.morphologyEx(partmask, cv2.MORPH_OPEN, kernel)
    partmask = signal.medfilt2d(partmask, kernel_size=3)
    ###################################################
    partmask = partmask.astype(np.uint8)

    scaDep = 255.0 / np.nanmax(partmask)
    depImg = np.multiply(partmask, scaDep)
    depI = depImg.astype(np.uint8)
    cv2.imwrite("/home/sthalham/visTests/mask.jpg", depI)

    mask = partmask > 20
    depth = np.where(mask, depth, 0.0)

    scaDep = 255.0 / np.nanmax(depth)
    depImg = np.multiply(depth, scaDep)
    depI = depImg.astype(np.uint8)
    cv2.imwrite("/home/sthalham/visTests/depMasked.jpg", depI)

    onethird = cv2.resize(depth,
                          None,
                          fx=1 / 3,
                          fy=1 / 3,
                          interpolation=cv2.INTER_AREA)
    res = (((onethird / 1000) * 1.41421356)**2) * 1000
    depth = onethird

    # discretize to resolution and apply gaussian
    dNonVar = np.divide(depth, res, out=np.zeros_like(depth), where=res != 0)
    dNonVar = np.round(dNonVar)
    dNonVar = np.multiply(dNonVar, res)
    noise = np.multiply(dNonVar, 0.0025)
    depthFinal = np.random.normal(loc=dNonVar, scale=noise, size=dNonVar.shape)

    depthFinal = cv2.GaussianBlur(depthFinal, (7, 7), 0.75, 0.75)

    # apply retardo-noise to non-object parts
    objmask = np.load(fn_mask)
    objmask = cv2.resize(objmask, (720, 540))
    objmask = np.where(objmask > 0, 255, 0)
    print(objmask.shape)
    print(np.nanmax(objmask))
    print(np.nanmin(objmask))
    edges = cv2.Canny(objmask, 100, 200)

    cv2.imwrite('/home/sthalham/visTests/edges.jpg', edges)

    # INTER_NEAREST - a nearest-neighbor interpolation
    # INTER_LINEAR - a bilinear interpolation (used by default)
    # INTER_AREA - resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
    # INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood
    # INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood
    depthFinal = cv2.resize(depthFinal,
                            None,
                            fx=3,
                            fy=3,
                            interpolation=cv2.INTER_NEAREST)

    return depthFinal, bboxes, poses, mask_ids
Пример #51
0
def apbackground(img,
                 ap_uorder_interp,
                 offsetlim=(-5, 5),
                 q=50,
                 npix_inter=3,
                 kernel_size=(11, 11),
                 polydeg=5):
    """ determine background/scattered light using inter-aperture pixels """

    img = np.array(img)

    # determine inter-aperture pixels
    ap_lo = np.sum(np.array([3, -3, 1]).reshape(-1, 1) * ap_uorder_interp[0:3],
                   axis=0)
    ap_hi = np.sum(np.array([1, -3, 3]).reshape(-1, 1) * ap_uorder_interp[-3:],
                   axis=0)

    # risky to extend inter-order region!
    # ap_extended = np.vstack((ap_lo, ap_uorder_interp, ap_hi))
    # ap_inter = (ap_extended[1:] + ap_extended[:-1]) * .5
    ap_extended = np.vstack((ap_lo, ap_uorder_interp))
    ap_inter = (ap_extended[1:] + ap_extended[:-1]) * .5

    # determine the valley of interp-aperture pixels
    try:
        ap_width, sys_offset = apwidth(img,
                                       ap_inter,
                                       offsetlim=(-3, 3),
                                       ap_npix=npix_inter,
                                       method='min')
    except:
        ap_width = (-2, +2)

    # get slice along dispersion axis
    order, ofst, xcoord, ycoord = get_aperture_index(ap_inter,
                                                     ap_width=ap_width)

    # extract median interp-aperture pixels
    def f(*args, **kwargs):
        return np.percentile(*args, **kwargs, q=q)

    spec1d_inter = sextract_all_aperture(img,
                                         ap_inter,
                                         ap_width=ap_width,
                                         func=f)
    spec_slc_tiled = np.tile(spec1d_inter,
                             (1, ap_width[1] - ap_width[0] + 1)).reshape(
                                 -1, ap_inter.shape[1])

    # interpolate median background
    sl = np.zeros_like(img) * np.nan
    for i_col in range(sl.shape[1]):
        # xcoord_ = xcoord[:, i_col]
        ycoord_ = ycoord[:, i_col]
        sl_ = spec_slc_tiled[:, i_col]
        sl_interp = interp1d(ycoord_[np.isfinite(sl_)],
                             sl_[np.isfinite(sl_)],
                             bounds_error=False)(np.arange(sl.shape[0]))

        # fill the ends
        ind_end = np.where(np.isfinite(sl_interp))[0][[0, -1]]
        sl_interp[:ind_end[0]] = sl_interp[ind_end[0]]
        sl_interp[ind_end[-1] + 1:] = sl_interp[ind_end[-1]]
        sl[:, i_col] = sl_interp

    # median filter
    sls = medfilt2d(sl, kernel_size=kernel_size)

    # 1D polyfit smoothing
    print("@SONG: polyfit filtering ...")
    bg_polymoothed0 = polyfitfilt(sl, deg=polydeg, axis=0)
    print("@SONG: polyfit filtering2 ...")
    bg_polymoothed01 = polyfitfilt(bg_polymoothed0, deg=polydeg, axis=1)

    res = dict(bg=sl, bg_smoothed=sls, bg_poly=bg_polymoothed01)

    return res
Пример #52
0
def MRELBP(im, N, R, r, w_c, w_r):
    #Takes Median Robust Extended Local Binary Pattern from image im
    #Uses N neighbours from radii R and r, R must be larger than r
    #Median filter uses kernel sizes w_c for center pixels, w_r[0] for larger radius and w_r[1]
    #for smaller radius
    #Grayscale values are centered at their mean and scales with global standad deviation

    #Mean grayscale value and std
    muI = im.mean()
    stdI = im.std()

    #Centering and scaling with std
    I = (im - muI) / stdI

    #Median filtering
    Ic = medfilt(I, w_c)
    #Center pixels
    d = round(R + (w_r[0] - 1) / 2)
    Ic = Ic[d:-d, d:-d]
    #Subtracting the mean pixel value from center pixels
    Ic = Ic - Ic.mean()
    #Bining center pixels
    Chist = np.zeros((1, 2))
    Chist[0, 0] = np.sum(Ic >= 0)
    Chist[0, 1] = np.sum(Ic < 0)
    # --------------- #
    #Chist[0,0] = np.sum(Ic>=-1e-06)
    #Chist[0,1] = np.sum(Ic<-1e-06)
    # --------------- #

    #Median filtered images for large and small radius
    IL = medfilt(I, w_r[0])
    #d1 = round((w_r[0]-1)/2)
    #IL = IL[d1:-d1,d1:-d1]
    IS = medfilt2d(I, w_r[1])
    #d2 = round((w_r[1]-1)/2)
    #IS = IS[d2:-d2,d2:-d2]

    #Neighbours
    pi = np.pi
    #Empty arrays for the neighbours
    row, col = np.shape(Ic)
    NL = np.zeros((row, col, N))
    NS = np.zeros((row, col, N))
    #print("Size (Ic): " + str(row) + ", " + str(col))
    for k in range(N):
        #Angle to the neighbour
        theta = 0 + k * (-1 * 2 * pi / N)
        #Large neighbourhood
        x = d + R * np.cos(theta)
        y = d + R * np.sin(theta)
        if abs(x - round(x)) < 1e-06 and abs(y - round(y)) < 1e-06:
            x = int(round(x))
            y = int(round(y))
            P = IL[y:y + row, x:x + col]
        else:
            P = imbilinear(IL, col, x, row, y)
        NL[:, :, k] = P
        #Small neighbourhood
        #x = r+r*np.cos(theta)
        #y = r+r*np.sin(theta)
        # --------------- #
        x = d + r * np.cos(theta)
        y = d + r * np.sin(theta)
        # --------------- #
        if abs(x - round(x)) < 1e-06 and abs(y - round(y)) < 1e-06:
            x = int(round(x))
            y = int(round(y))
            P = IS[y:y + row, x:x + col]
        else:
            P = imbilinear(IS, col, x, row, y)
        NS[:, :, k] = P
    #Thresholding

    #Thresholding radial neighbourhood
    NR = NL - NS

    #Subtraction of means
    #Large neighbourhood
    NLmu = NL.mean(axis=2)
    #Small neighbouhood
    NSmu = NS.mean(axis=2)

    for k in range(N):
        NL[:, :, k] = NL[:, :, k] - NLmu
        NS[:, :, k] = NS[:, :, k] - NSmu

    #Converting to binary images and taking the lbp values

    #Initialization of arrays
    lbpIL = np.zeros((row, col))
    lbpIS = np.zeros((row, col))
    lbpIR = np.zeros((row, col))

    for k in range(N):
        lbpIL = lbpIL + (NL[:, :, k] >=
                         0) * 2**k  # NOTE ACCURACY FOR THRESHOLDING!!!
        lbpIS = lbpIS + (NS[:, :, k] >= 0) * 2**k
        lbpIR = lbpIR + (NR[:, :, k] >= 0) * 2**k
        # --------------- #
        #lbpIL = lbpIL+(NL[:,:,k]>=-1e-06)*2**k # NOTE ACCURACY FOR THRESHOLDING!!!
        #lbpIS = lbpIS+(NS[:,:,k]>=-1e-06)*2**k
        #lbpIR = lbpIR+(NR[:,:,k]>=-1e-06)*2**k
        # --------------- #

    #Binning
    Lhist = np.zeros((1, 2**N))
    Shist = np.zeros((1, 2**N))
    Rhist = np.zeros((1, 2**N))
    for k in range(2**N):
        Lhist[0, k] = np.sum(lbpIL == k)
        Shist[0, k] = np.sum(lbpIS == k)
        Rhist[0, k] = np.sum(lbpIR == k)

    #Chist = 1/np.linalg.norm(Chist)*Chist
    #Lhist = 1/np.linalg.norm(Lhist)*Lhist
    #Shist = 1/np.linalg.norm(Shist)*Shist
    #Rhist = 1/np.linalg.norm(Rhist)*Rhist
    #Mapping
    mapping = getmapping(N)
    Lhist = maplbp(Lhist, mapping)
    Shist = maplbp(Shist, mapping)
    Rhist = maplbp(Rhist, mapping)
    hist = np.concatenate((Chist, Lhist, Shist, Rhist), 1)

    return hist, lbpIL, lbpIS, lbpIR
Пример #53
0
def median_filter(picture: Picture, size=5):
    matrix = signal.medfilt2d(picture.matrix, kernel_size=size)  # a lot faster
    # matrix = __base_filter(picture.matrix, window_size, lambda window: np.median(window))
    return Picture(picture.title, matrix)
Пример #54
0
capture = cv.VideoCapture(0)
capture.set(cv.CAP_PROP_FPS, 1)

# Set up Background Subtraction Model (bgsub)
backSub = cv.createBackgroundSubtractorMOG2()

# Bgsub and median filtering parameters
mask_thresh = 255
kernel_size = 25
lr = 0.05
burn_in = 30
i = 0

# Loop through the frames from the webcam
while True:
    ret, frame = capture.read()
    fgMask = backSub.apply(frame, learningRate=lr)
    # Avoid early false positives
    if i < burn_in:
        i += 1
        continue
    # Threshold mask - plot when change detected
    fgMaskMedian = medfilt2d(fgMask, kernel_size)
    if (fgMaskMedian >= mask_thresh).any():
        fig, axs = plt.subplots(1, 3)
        frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
        axs[0].imshow(frame)
        axs[1].imshow(fgMask)
        axs[2].imshow(fgMaskMedian)
        plt.show()
Пример #55
0
def get_beam_origin(
    img:np.ndarray, 
    slit_cnrs:np.ndarray=None, 
    size:Tuple=(500, 500),
    ) -> Tuple:
    """
    Description
    -----------
    Return the image coordinate (row, col) of the supposed beamcenter that provides the most homogeneous 
    beam proflie
    
    Parameters
    ----------
    img: np.ndarray
        Input image with only slits
    slit_cnrs: np.ndarray
        Image coordinates of the four corners defined by the slits
    size: (int, int)
        (row, col) size of the desired FOV.  A 500x500 FOV is commonly used to located the beamcenter.
        NOTE: smaller size often helps, but it should be depending on the actual FOV intended for the experiment
        
    Returns
    -------
    Tuple
    The image coordinates (row, col) of the beam center.  In ImageJ, this coordinate is displayed as (col, row).
    """
    img = _safe_read_img(img)

    # sanity check to make sure the FOV is not too large
    if size[0] > img.shape[0]:
        raise ValueError("FOV is way too large in vertical direction")
    if size[1] > img.shape[1]:
        raise ValueError("FOV is way too large in horizontal direction")
    # get the domain size
    _srow, _scol = size
    
    # detect slit corner is not provided
    slit_cnrs = np.array(detect_slit_corners(img)) if slit_cnrs is None else np.array(slit_cnrs)
    slit_top, slit_bot = int(min(slit_cnrs[:,0])), int(max(slit_cnrs[:,0]))
    slit_lft, slit_rgt = int(min(slit_cnrs[:,1])), int(max(slit_cnrs[:,1]))
        
    # avoid impact of noisy pixels (defects in detector)
    img = medfilt2d(img.astype(float))
    
    # find the brightest spot in the image, use it as a starting point
    _beamcenter = np.unravel_index(np.argmax(img, axis=None), img.shape)  # print(x_b, y_b, img[y_b, x_b])
    
    # form bounds as contraints
    def _row_in_range(beamcenter):
        return -1*(beamcenter[0]-slit_top+_srow/2)*(beamcenter[0]-slit_bot+_srow/2)
    def _col_in_range(beamcenter):
        return -1*(beamcenter[1]-slit_lft+_scol/2)*(beamcenter[1]-slit_rgt+_scol/2)
    
    # define objective function
    def _obj(beamcenter):
        _r, _c = beamcenter.astype(int)
        _data = img[_r-_srow:_r+_srow, _c-_scol:_c+_scol]
        
        _hp = np.average(_data, axis=0)
        _hmod = GaussianModel(prefix='hp_')
        _hfit = _hmod.fit(_hp, x=np.arange(_hp.shape[0]), hp_center=len(_hp)/2)
        
        _vp = np.average(_data, axis=1)
        _vmod = GaussianModel(prefix='vp_')
        _vfit = _vmod.fit(_vp, x=np.arange(_vp.shape[0]), vp_center=len(_vp)/2)
        
        # rms
        # minimizing the assymetry of the beam proflie in both directions to
        # the best we can.
        # NOTE: 
        #   The beam is not always symmetric, and we need (kind of) symmetric
        #   beam for ff-HEDM and nf-HEDM scan.
        return np.sqrt(
              0.5*(_hfit.best_values['hp_center']- len(_hp)/2)**2 \
            + 0.5*(_vfit.best_values['vp_center']- len(_vp)/2)**2
        )
    
    _rst = sp.optimize.minimize(_obj, _beamcenter,
                               constraints=({'type': 'ineq', 'fun':  _row_in_range },
                                            {'type': 'ineq', 'fun':  _col_in_range },
                                           ),
                               method='COBYLA',
                              )
    return _rst.x
Пример #56
0
def get_pin_outline(
    img_pin: np.ndarray, 
    incrop:  int=61, 
    adapthist_clip: float=0.01,
    upsampling: int=12,
    ) -> list:
    """
    Description
    -----------
        Using canny edge detection and Hough transformation to detect the
        outline of a pin, which is commonly used for alignment of rotation
        stages at MPE@APS.
    
    Parameters
    ----------
    img_pin: np.ndarray
        input image with pin in the FOV
    incrop: int
        number of pixels to cropped into FOV to avoid interference of the slit blades
    adapthist_clip: float
        decrease it to supporess artifacts from scintillators and cam
    upsampling: int
        repeat hough transform to get more line segments of the same feature
        
    Returns
    -------
    list
        line segments in image coordiantes for the pin outline
    """
    # use log to suppress scitilator artifacts 
    img_pin = np.log(img_pin)
    
    # get the slit corner
    cnrs = np.array(detect_slit_corners(img_pin))
    
    # get the cropping location
    _minrow, _maxrow = int(min(cnrs[:,0])), int(max(cnrs[:,0]))
    _mincol, _maxcol = int(min(cnrs[:,1])), int(max(cnrs[:,1]))
    
    # crop the img
    # NOTE: agressisve incropping to avoid the edge detection interference from slits
    _img = exposure.rescale_intensity(
        img_pin[_minrow+incrop : _maxrow-incrop, 
                _mincol+incrop : _maxcol-incrop]
    )
    
    # use canny + hough_line to get outline segment
    _img = medfilt2d(_img)
    _img = exposure.equalize_adapthist(_img, clip_limit=adapthist_clip)
    _edges = canny(_img, sigma=3)

    # use multiprocessing for upsampling
    _cpus = max(multiprocessing.cpu_count() - 2, 2)
    with cf.ProcessPoolExecutor(max_workers=_cpus) as e:
        # schedule
        _jobs = [e.submit(
            probabilistic_hough_line,
            _edges,
            threshold=10,  
            line_length=7,  # Increase the parameter to extract longer lines.
            line_gap=2,     # Decrease the number to allow more short segments
            ) for _ in range(upsampling)]
        # # execute
        _lines = list(itertools.chain(*[me.result() for me in _jobs]))
    
    return [[(pt[0]++_mincol+incrop, pt[1]+_minrow+incrop) for pt in line] for line in _lines] 
Пример #57
0
            if current_evt == stop_evt:
                print("reached the end")
                continue
        else:
            stop_evt = current_evt + read_per_cycle
        print("Reading evt {} to {}...".format(current_evt, stop_evt - 1))
        #timestamps, ampl, blocks, phases = read_raw_signal(reader, range(current_evt, stop_evt), get_timestamp=True, calibrated = calibrated)
        #timestamps, ampl, blocks, phases = read_raw_signal_array(reader, range(current_evt, stop_evt), get_timestamp=True, calibrated = calibrated)
        ampl, timestamps, first_cell_ids, stale_bit = read_calibrated_data(
            args.infile,
            DATADIR=DATADIR,
            event_list=range(current_evt, stop_evt))

        for i in range(current_evt, stop_evt):
            im = show_image(ampl[i - current_evt], maxZ=4000, show=False)
            im_smooth = medfilt2d(im, 3)

            #if np.percentile(im_smooth[im_smooth != 0], 90) > 500:
            if np.percentile(im_smooth[im_smooth != 0], 20) > 200:
                print("This is probably a flasher event")
                isf = 'f'
                if args.flasher:
                    with open(flasher_file, 'a') as ffio:
                        ffio.write("{}\n".format(i))
            else:
                isf = ''
            if args.flasher and isf == '':
                continue
            elif not args.flasher and isf == 'f':
                # let's skip flashers
                continue
Пример #58
0
    def detect_debris(self, ov_number, method):
        debris_detected = False
        msg = 'CTRL: No debris detection method selected.'
        ov_roi = [None, None]
        # Crop to current debris detection area:
        top_left_px, top_left_py, bottom_right_px, bottom_right_py = (
            self.ovm.get_ov_debris_detection_area(ov_number))
        for i in range(2):
            ov_img = self.ov_images[ov_number][i][1]
            ov_roi[i] = ov_img[top_left_py:bottom_right_py,
                               top_left_px:bottom_right_px]
        height, width = ov_roi[0].shape

        if method == 0:
            # Calculate the maximum difference in mean and stddev across
            # four quadrants and full ROI:
            means = {}
            stddevs = {}
            max_diff_mean = 0
            max_diff_stddev = 0
            # Compute mean and stddev for four quadrants
            # and for full ROI
            area_height = bottom_right_py - top_left_py
            area_width = bottom_right_px - top_left_px
            quadrant_area = (area_height * area_width) / 4

            for i in range(2):
                quadrant1 = ov_roi[i][0:int(area_height / 2),
                                      0:int(area_width / 2)]
                quadrant2 = ov_roi[i][0:int(area_height / 2),
                                      int(area_width / 2):area_width]
                quadrant3 = ov_roi[i][int(area_height / 2):area_height,
                                      0:int(area_width / 2)]
                quadrant4 = ov_roi[i][int(area_height / 2):area_height,
                                      int(area_width / 2):int(area_width)]
                means[i] = [
                    np.mean(quadrant1),
                    np.mean(quadrant2),
                    np.mean(quadrant3),
                    np.mean(quadrant4),
                    np.mean(ov_roi[i])
                ]
                stddevs[i] = [
                    np.std(quadrant1),
                    np.std(quadrant2),
                    np.std(quadrant3),
                    np.std(quadrant4),
                    np.std(ov_roi[i])
                ]

            if quadrant_area < self.debris_roi_min_quadrant_area:
                # Use only full ROI if ROI too small for quadrants:
                start_i = 4
                var_str = 'OV ROI (no quadrants)'
            else:
                # Use four quadrants and ROI for comparisons:
                start_i = 0
                var_str = 'OV quadrants'
            for i in range(start_i, 5):
                diff_mean_i = abs(means[1][i] - means[0][i])
                if diff_mean_i > max_diff_mean:
                    max_diff_mean = diff_mean_i
                diff_stddev_i = abs(stddevs[1][i] - stddevs[0][i])
                if diff_stddev_i > max_diff_stddev:
                    max_diff_stddev = diff_stddev_i

            msg = ('CTRL: ' + var_str +
                   ': max. diff_M: {0:.2f}'.format(max_diff_mean) +
                   '; max. diff_SD: {0:.2f}'.format(max_diff_stddev))

            debris_detected = ((max_diff_mean > self.mean_diff_threshold) or
                               (max_diff_stddev > self.stddev_diff_threshold))

        if method == 1:
            # Compare the histogram count from the difference image to user-
            # specified threshold.

            # Apply median filter to denoise images:
            ov_curr = medfilt2d(ov_roi[1], self.median_filter_kernel_size)
            ov_prev = medfilt2d(ov_roi[0], self.median_filter_kernel_size)

            # Pixel difference
            # Recast as int16 before subtraction:
            ov_curr = ov_curr.astype(np.int16)
            ov_prev = ov_prev.astype(np.int16)
            ov_diff_img = np.absolute(np.subtract(ov_curr, ov_prev))
            # Histogram of difference image:
            diff_histogram, bin_edges = np.histogram(ov_diff_img, 256,
                                                     [0, 256])
            # Compute sum for counts above lower limit:
            diff_sum = 0
            for i in range(self.image_diff_hist_lower_limit, 256):
                diff_sum += diff_histogram[i]
            threshold = self.image_diff_threshold * height * width / 1e6
            msg = ('CTRL: OV: image_diff_hist_sum: ' + str(diff_sum) +
                   ' (curr. threshold: ' + str(int(threshold)) + ')')
            debris_detected = (diff_sum > threshold)

        if method == 2:
            # Compare histograms directly (this is not very effective,
            # for testing purposes.)
            hist_diff_sum = 0
            # Histogram from previous OV:
            hist1, bin_edges = np.histogram(ov_roi[0], 256, [0, 256])
            # Histogram from current OV
            hist2, bin_edges = np.histogram(ov_roi[1], 256, [0, 256])
            for i in range(256):
                hist_diff_sum += abs(hist1[i] - hist2[i])
            threshold = self.histogram_diff_threshold * height * width / 1e6

            msg = ('CTRL: OV: hist_diff_sum: ' + str(hist_diff_sum) +
                   ' (curr. threshold: ' + str(int(threshold)) + ')')
            debris_detected = (hist_diff_sum > threshold)

        return debris_detected, msg
Пример #59
0
def Conv_MRELBP(image, N, R, r, wR, wr, wc):
    #Whiten the image
    imu = image.mean()
    istd = image.std()
    im = (image - imu) / istd
    #Get image dimensions
    h, w = im.shape[:2]
    #Make kernels
    kR = []
    kr = []
    dtheta = np.pi * 2 / N
    for k in range(0, N):
        _kernel = weight_matrix_bilin(R, -k * dtheta, val=0)
        kR.append(_kernel)

        _kernel = weight_matrix_bilin(r, -k * dtheta, val=0)
        kr.append(_kernel)

    #Make median filtered images
    imc = medfilt2d(im.copy(), wc)
    imR = medfilt2d(im.copy(), wR)
    imr = medfilt2d(im.copy(), wr)

    #Get LBP images
    neighbR = np.zeros((h, w, N))
    neighbr = np.zeros((h, w, N))
    for k in range(N):
        _neighb = correlate(imR, kR[k])
        neighbR[:, :, k] = _neighb
        _neighb = correlate(imr, kr[k])
        neighbr[:, :, k] = _neighb

    #Crop valid convolution region
    d = R + wR // 2
    h -= 2 * d
    w -= 2 * d

    neighbR = neighbR[d:-d, d:-d, :]
    neighbr = neighbr[d:-d, d:-d, :]
    imc = imc[d:-d, d:-d]

    #Subtraction
    _muR = neighbR.mean(2).reshape(h, w, 1)
    for k in range(N):
        try:
            muR = np.concatenate((muR, _muR), 2)
        except NameError:
            muR = _muR

    _mur = neighbr.mean(2).reshape(h, w, 1)
    for k in range(N):
        try:
            mur = np.concatenate((mur, _mur), 2)
        except NameError:
            mur = _mur

    diffc = (imc - imc.mean()) >= 0
    diffR = (neighbR - muR) >= 0
    diffr = (neighbr - mur) >= 0
    diffR_r = (neighbR - neighbr) >= 0

    #Compute lbp images
    lbpc = diffc
    lbpR = np.zeros((h, w))
    lbpr = np.zeros((h, w))
    lbpR_r = np.zeros((h, w))
    for k in range(N):
        lbpR += diffR[:, :, k] * (2**k)
        lbpr += diffr[:, :, k] * (2**k)
        lbpR_r += diffR_r[:, :, k] * (2**k)
    #Get LBP histograms
    histc = np.zeros((1, 2))
    histR = np.zeros((1, 2**N))
    histr = np.zeros((1, 2**N))
    histR_r = np.zeros((1, 2**N))

    histc[0, 0] = (lbpc == 1).astype(np.float32).sum()
    histc[0, 1] = (lbpc == 0).astype(np.float32).sum()

    for k in range(2**N):
        histR[0, k] = (lbpR == k).astype(np.float32).sum()
        histr[0, k] = (lbpr == k).astype(np.float32).sum()
        histR_r[0, k] = (lbpR_r == k).astype(np.float32).sum()

    #Mapping
    mapping = getmapping(N)
    histR = maplbp(histR, mapping)
    histr = maplbp(histr, mapping)
    histR_r = maplbp(histR_r, mapping)

    #Append histograms
    hist = np.concatenate((histc, histR, histr, histR_r), 1)

    return hist
Пример #60
0
 def get_wave_arrays(self, pgr=False):
     '''Calculates wave direction (180 degrees of amiguity), 
     significant wave height and wave period.
     
         prg: Boolean, default is False, each imagette is
                     independent of the others, pixels belong to only one imagette.
                     If True, pixels will belong to multiple imagettes at the same time
                     since imagattes will overlap because an imagette is created for each 
                     pixel independently of multilook value; N of pixels = N of imagettes.
                     Each pixel will belong to multiple imagettes, but there will be
                     one imagette where this pixel will be the centre of the imagette'''
     
     
     sigma = self.get_var_array(self.ds, Wave.sigma)
     inci_matrix = self.get_var_array(self.ds, Wave.incidence)
     beta_matrix = self.get_var_array(self.ds, Wave.slant)/Wave.S1_V
     direc_matrix = np.zeros(shape=sigma.shape)
     phi_matrix = np.zeros(shape=sigma.shape)
     cutoff_matrix = np.zeros(shape=sigma.shape)
     height_matrix = np.zeros(shape=sigma.shape)
     period_matrix = np.zeros(shape=sigma.shape)
     
     subimages = list(self.gen_imagettes(sigma, multilook=Wave.direc_attr['scale_factor'], progressive_multilook=pgr))
     for roi in subimages:
         direc = np.angle(np.max(np.fft.fft2(roi[0])), deg=True)
         
         phi = direc - float(self.ds.attrs['azimuth_direction'])
         
         psd = signal.csd(roi[0], np.transpose(roi[0]),scaling='density')
         phases = (np.random.randn(psd.shape[0]*psd.shape[1]).reshape(psd.shape))*2*np.pi
         acf = np.fft.ifft2(np.sqrt(psd*2)*np.exp(1j*phases))
         std = np.std(signal.medfilt2d(np.abs(acf), kernel_size=11))
         cutoff = np.sqrt(2*np.pi*std)
         
         if len(roi[1]) == 2:
             sub_inci = np.deg2rad(np.mean(inci_matrix[roi[1][0],roi[1][1]]))
             sub_beta = np.mean(beta_matrix[roi[1][0],roi[1][1]])
             
             hei = ((cutoff/sub_beta)*(0.48 + 0.26*np.sin(sub_inci) + 0.27*np.cos(2*np.deg2rad(phi)))) + 0.22
             period = (hei*(sub_beta/cutoff)*1.65) + 5.60
             
             direc_matrix[roi[1][0],roi[1][1]] = direc
             phi_matrix[roi[1][0],roi[1][1]] = phi
             cutoff_matrix[roi[1][0], roi[1][1]] = cutoff
             height_matrix[roi[1][0], roi[1][1]] = hei
             period_matrix[roi[1][0], roi[1][1]] = period
             
         elif len(roi[1]) == 4:
             sub_inci = np.deg2rad(np.mean(inci_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]]))
             sub_beta = np.mean(beta_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]])
             
             hei = ((cutoff/sub_beta)*(0.48 + 0.26*np.sin(sub_inci) + 0.27*np.cos(2*np.deg2rad(phi)))) + 0.22
             period = (hei*(sub_beta/cutoff)*1.65) + 5.60
             
             direc_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = direc
             phi_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = phi
             cutoff_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = cutoff
             height_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = hei
             period_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = period
     
     self.add_var(self.out, Wave.direc_name, self.downsampling_2D(direc_matrix, multilook=Wave.direc_attr['scale_factor'], mode='angle'), Wave.direc_attr)
     self.add_var(self.out, Wave.height_name, self.downsampling_2D(height_matrix, multilook=Wave.height_attr['scale_factor']), Wave.height_attr)
     self.add_var(self.out, Wave.period_name, self.downsampling_2D(period_matrix, multilook=Wave.period_attr['scale_factor']), Wave.period_attr)
     if self.inter == True:
         self.add_var(self.out, Wave.phi_name, self.downsampling_2D(phi_matrix, multilook=Wave.phi_attr['scale_factor'], mode='angle'), Wave.phi_attr)
         self.add_var(self.out, Wave.cutoff_name, self.downsampling_2D(cutoff_matrix, multilook=Wave.cutoff_attr['scale_factor']), Wave.cutoff_attr)