Ejemplo n.º 1
0
def return_grad(data, filter_grad=False, grad_filter_max=0.5, gfsize=0):
    #r_dx = _np.zeros_like(r)
    #r_dx[1:,:] = r[1:,:]-r[:-1,:]
    #r_dy = _np.zeros_like(r)
    #r_dy[:,1:] = r[:,1:]-r[:,:-1]
    data_dx = _np.zeros_like(data)
    data_dx[1:, :] = -1 * (data[1:, :] - data[:-1, :])
    data_dy = _np.zeros_like(data)
    data_dy[:, 1:] = (data[:, 1:] - data[:, :-1])
    data_dxdy = -1 * _np.array([data_dx, data_dy])
    if filter_grad:
        wx = _np.where(abs(data_dxdy[0]) > grad_filter_max)
        wy = _np.where(abs(data_dxdy[1]) > grad_filter_max)
        #print "Length wx: ", len(wx[0])
        '''
        for i in range(len(wx[0])):
            wx0 = wx[0][i]
            wx1 = wx[1][i]
            surrounding = data_dxdy[0][wx0-1:wx0+2,wx1-1:wx1+2].mean()
            data_dxdy[0][wx0,wx1] = surrounding
        for i in range(len(wy[0])):
            wy0 = wy[0][i]
            wy1 = wy[1][i]
            surrounding = data_dxdy[1][wy0-1:wy0+2,wy1-1:wy1+2].mean()
            data_dxdy[1][wy0,wy1] = surrounding
        '''
        data_dxdy[0][wx[0], wx[1]] = 0
        data_dxdy[1][wy[0], wy[1]] = 0
    if gfsize > 0:
        data_dxdy[0] = gf(data_dxdy[0], gfsize)
        data_dxdy[1] = gf(data_dxdy[1], gfsize)
    return data_dxdy
Ejemplo n.º 2
0
def return_grad(data, filter_grad=False, grad_filter_max=0.5, gfsize=0):
    #r_dx = _np.zeros_like(r)
    #r_dx[1:,:] = r[1:,:]-r[:-1,:]
    #r_dy = _np.zeros_like(r)
    #r_dy[:,1:] = r[:,1:]-r[:,:-1]
    data_dx = _np.zeros_like(data)
    data_dx[1:,:] = -1*(data[1:,:]-data[:-1,:])
    data_dy = _np.zeros_like(data)
    data_dy[:,1:] = (data[:,1:]-data[:,:-1])
    data_dxdy = -1*_np.array([data_dx,data_dy])
    if filter_grad:
        wx = _np.where(abs(data_dxdy[0])>grad_filter_max)
        wy = _np.where(abs(data_dxdy[1])>grad_filter_max)
        #print "Length wx: ", len(wx[0])
        '''
        for i in range(len(wx[0])):
            wx0 = wx[0][i]
            wx1 = wx[1][i]
            surrounding = data_dxdy[0][wx0-1:wx0+2,wx1-1:wx1+2].mean()
            data_dxdy[0][wx0,wx1] = surrounding
        for i in range(len(wy[0])):
            wy0 = wy[0][i]
            wy1 = wy[1][i]
            surrounding = data_dxdy[1][wy0-1:wy0+2,wy1-1:wy1+2].mean()
            data_dxdy[1][wy0,wy1] = surrounding
        '''
        data_dxdy[0][wx[0],wx[1]] = 0
        data_dxdy[1][wy[0],wy[1]] = 0
    if gfsize>0:
        data_dxdy[0] = gf(data_dxdy[0],gfsize)
        data_dxdy[1] = gf(data_dxdy[1],gfsize)
    return data_dxdy
Ejemplo n.º 3
0
    def findcenter(self, smooth_sigma=1.8, smoothbg_sigma=40):
        """
    Finds the pixel with the highest intensity.

    Parameters:
    -----------
    smooth_sigma : number
      Sigma for the 2D gaussian that is to convolve
      the frame before attempting to find the center.
    smoothbg_sigma : number
      Sigma for the 2D gaussian that will be used
      to remove low frequency variations across the
      detector.
    """

        try:
            arr = self.diff
        except AttributeError:
            arr = self.diff_savesets()

        nframes = len(arr)
        locs = reshape(zeros(nframes * 2, dtype='int'), (nframes, 2))

        for i in range(nframes):
            a = arr[i]
            b = gf(a, smooth_sigma) - gf(a, smoothbg_sigma)
            locs[i] = [j[0] for j in where(b == b.max())]

        self.centerlocs = locs

        return locs
Ejemplo n.º 4
0
 def color_diffuse(self, amount):
     sigma = 1.5 * amount + 2
     scale = amount
     labout = np.copy(self.labin)
     labout[:, :, 1] = gf(self.labin[:, :, 1], sigma)
     labout[:, :, 2] = gf(self.labin[:, :, 2], sigma)
     diffuse = cv2.cvtColor(labout, cv2.COLOR_LAB2RGB)
     return diffuse
Ejemplo n.º 5
0
def random_gal(galreal,Nrand,Nmin=10):
    """ Prefered random galaxy generator. For a given galaxy with a
    given magnitude (and other properties), it calculates the redshift
    sensitivity function from galaxies in a magnitude band around the
    selected one (i.e., including slightly brighter and fainter
    galaxies), and places Nrand new galaxies at a random redshift given
    by a smoothed version of the observed sensitivity function. For
    extremely bright or faint galaxies (rare) the sensitivity function
    is calculated from at least Nmin (=50) galaxies (i.e. the magnitude
    band is increased)."""
    
    from astro.sampledist import RanDist
    from astro.fit import InterpCubicSpline
    from scipy.ndimage import gaussian_filter as gf
    
    Ckms  = 299792.458
    Nmin  = int(Nmin)  #minimum number of galaxys for the fit
    zmin  = np.min(galreal.ZGAL)
    zmax  = np.max(galreal.ZGAL) + 0.1
    DZ    = 0.01       #delta z for the histogram for getting the spline in z
    smooth_scale = 10. #smoothing scale for the histogram (in number
                       #of bins, so depends on DZ)
    galreal.sort(order='MAG') #np.recarray.sort()
    galrand = galreal.repeat(Nrand)
    delta_mag = 0.5 # half of the magnitude bandwidth to generate the z histogram
    
    bins = np.append(np.linspace(0,zmin,20),np.arange(zmin+DZ, zmax, DZ))
    
    for i in xrange(len(galreal)):
        if i < Nmin: 
            vals,bins = np.histogram(galreal.ZGAL[:Nmin], bins)
            vals      = gf(vals.astype(float),smooth_scale) # smooth the histogram
            spl       = InterpCubicSpline(0.5*(bins[:-1] + bins[1:]), vals.astype(float))
            
        else:
            delta_mag2=delta_mag
            while True:
                cond = (galreal.MAG > 0) & (galreal.MAG < 90) 
                cond = cond & (galreal.MAG<=galreal.MAG[i]+delta_mag2)&(galreal.MAG>galreal.MAG[i]-delta_mag2)
                if np.sum(cond)>=Nmin:
                    break
                else:
                    delta_mag2+=0.1
            vals,bins = np.histogram(galreal.ZGAL[cond], bins)
            vals      = gf(vals.astype(float),smooth_scale) # smooth the histogram
            spl       = InterpCubicSpline(0.5*(bins[:-1] + bins[1:]), vals.astype(float))
        
        rvals     = np.linspace(0, zmax, 1e4)
        rand_z    = RanDist(rvals, spl(rvals))
        zrand     = rand_z.random(Nrand)
        
        integer_random2 = np.random.randint(0,len(galreal),Nrand) #for RA,DEC  
        RArand  = galreal.RA[integer_random2]
        DECrand = galreal.DEC[integer_random2]
        galrand.ZGAL[i*Nrand:(i+1)*Nrand] = zrand
        galrand.RA[i*Nrand:(i+1)*Nrand]   = RArand
        galrand.DEC[i*Nrand:(i+1)*Nrand]  = DECrand
    return galrand
Ejemplo n.º 6
0
def random_gal(galreal,Nrand,Nmin=20):
    """ Prefered random galaxy generator. For a given galaxy with a
    given magnitude (and other properties), it calculates the redshift
    sensitivity function from galaxies in a magnitude band around the
    selected one (i.e., including slightly brighter and fainter
    galaxies), and places Nrand new galaxies at a random redshift given
    by a smoothed version of the observed sensitivity function. For
    extremely bright or faint galaxies (rare) the sensitivity function
    is calculated from at least Nmin (=50) galaxies (i.e. the magnitude
    band is increased)."""
    
    from astro.sampledist import RanDist
    from astro.fit import InterpCubicSpline
    from scipy.ndimage import gaussian_filter as gf
    
    Ckms  = 299792.458
    Nmin  = int(Nmin)  #minimum number of galaxys for the fit
    zmin  = np.min(galreal.ZGAL)
    zmax  = np.max(galreal.ZGAL) + 0.1
    DZ    = 0.01     #delta z for the histogram for getting the spline in z
    smooth_scale = 10. #smoothing scale for the histogram (in number
                       #of bins, so depends on DZ)
    galreal.sort(order='MAG') #np.recarray.sort()
    galrand = galreal.repeat(Nrand)
    delta_mag = 0.5 # half of the magnitude bandwidth to generate the z histogram
    
    bins = np.append(np.linspace(0,zmin,20),np.arange(zmin+DZ, zmax, DZ))
    
    for i in xrange(len(galreal)):
        if i < Nmin: 
            vals,bins = np.histogram(galreal.ZGAL[:Nmin], bins)
            vals      = gf(vals.astype(float),smooth_scale) # smooth the histogram
            spl       = InterpCubicSpline(0.5*(bins[:-1] + bins[1:]), vals.astype(float))
            
        else:
            delta_mag2=delta_mag
            while True:
                cond = (galreal.MAG > 0) & (galreal.MAG < 90) 
                cond = cond & (galreal.MAG<=galreal.MAG[i]+delta_mag2)&(galreal.MAG>galreal.MAG[i]-delta_mag2)
                if np.sum(cond)>=Nmin:
                    break
                else:
                    delta_mag2+=0.1
            vals,bins = np.histogram(galreal.ZGAL[cond], bins)
            vals      = gf(vals.astype(float),smooth_scale) # smooth the histogram
            spl       = InterpCubicSpline(0.5*(bins[:-1] + bins[1:]), vals.astype(float))
        
        rvals     = np.linspace(0, zmax, 1e4)
        rand_z    = RanDist(rvals, spl(rvals))
        zrand     = rand_z.random(Nrand)
        galrand.ZGAL[i*Nrand:(i+1)*Nrand] = zrand
    return galrand
Ejemplo n.º 7
0
def highlight_feature(cal_stack):
    diff = []
    grad = []
    for i in range(len(cal_stack) - 1):
        dd = cal_stack[i] - cal_stack[i + 1]
        diff.append(dd)
        dd = get_grad(cal_stack[i])
        grad.append(dd)

    diff = np.array(diff).mean(axis=0)
    grad = np.array(grad).mean(axis=0)
    featuremap = gf(diff, 5) * gf(grad, 5)
    return featuremap
Ejemplo n.º 8
0
 def xi_ag_LS(self,sigma=0,jacknife=True,f1=None,f2=None,f3=None):
     """ Returns the abs-gal 2D2PCF using Landy & Szalay
     estimator. It smooths the pair-pair counts with a Gaussian
     kernel with sigma = [rs,ts] (for each direction, see
     scipy.ndimage.gaussian_filter), before computing the
     cross-correlation. It also gives the projected along the LOS
     measurement from DxDy_1D values (not implemented yet)."""
     
     from pyntejos.xcorr.xcorr import W3
     from scipy.ndimage import gaussian_filter as gf
     s = sigma
     Wag,_  = W3(gf(self.DaDg,s),gf(self.RaRg,s),gf(self.DaRg,s),gf(self.RaDg,s),f1=f1,f2=f2,f3=f3)
     
     #jacknife error
     err_Wjk = np.zeros((len(self.rbinedges) - 1, len(self.tbinedges) - 1), float)
     if jacknife:
         for field in self.fields:
             DaDg_aux = self.DaDg - field.DaDg(self.rbinedges,self.tbinedges) 
             RaRg_aux = self.RaRg - field.RaRg(self.rbinedges,self.tbinedges)
             DaRg_aux = self.DaRg - field.DaRg(self.rbinedges,self.tbinedges)
             RaDg_aux = self.RaDg - field.RaDg(self.rbinedges,self.tbinedges)
             Wag_aux,_ = W3(gf(DaDg_aux,s),gf(RaRg_aux,s),gf(DaRg_aux,s),gf(RaDg_aux,s),f1=f1,f2=f2,f3=f3)
             err_Wjk += (Wag - Wag_aux)**2
         N        = len(self.fields)
         err_Wjk = (N - 1.) / N * err_Wjk
         err_Wjk = np.sqrt(err_Wjk)
     return Wag, err_Wjk
Ejemplo n.º 9
0
def resistive(run, time, comp, smooth='no',sigma=1):

    """
    returns the resistive term of the Ohm's law,
    at time if time is float or averaged on the list of time if time is a list (of floats)

    @return  : np.ndarray of shape run.vfield_shape

    """

    if isinstance(time, collections.Iterable) == False:
        time = [time]
        ntot = 1
    else:
        ntot = time.size

    term = np.zeros(shape = run.sfield_shape, dtype = run.dtype)

    for t in time:

        term += run.getJ(t, comp)

    term *= run.getResistivity()/ntot

    if smooth.lower() == 'yes':
        term = gf(term, sigma = sigma, order = 0)

    return term
Ejemplo n.º 10
0
def pdderiv(ar, dx=1., ax=0, order=4, smth=None):
    """
      pderiv gives the double partial derivative
      of a periodic array along a given axis.

      Inputs:
         ar - The input array
         dx - Grid spacing, defaults to 1.
         ax - Axis along which to take the derivative
         order - Order of accuracy, (2,4) defaults to 2

      Output:
         dar - The derivative array
   """
    if smth is not None:
        ar = gf(ar, sigma=smth)
    if order == 2:
        dar = (np.roll(ar, -1, axis=ax) - 2 * ar +
               np.roll(ar, 1, axis=ax)) / dx**2
    elif order == 4:
        dar = (-np.roll(ar, -2, axis=ax) + 16 * np.roll(ar, -1, axis=ax) -
               30 * ar + 16 * np.roll(ar, 1, axis=ax) -
               np.roll(ar, 2, axis=ax)) / (12 * dx**2)

    return dar
Ejemplo n.º 11
0
def Velpad(vel, nPMLs, factor):
    from scipy.ndimage import gaussian_filter as gf
    velPML = np.pad(vel, [[nPMLs[0], nPMLs[1]], [nPMLs[2], nPMLs[3]]], 'edge')
    velPML = np.pad(vel, [nPMLs[:2], nPMLs[2:]], 'edge')
    Nz, Nx = np.shape(velPML)
    for iz in range(nPMLs[0]):
        velPML[iz, :] = gf(velPML[iz, :], (nPMLs[0] - iz) * factor)
    for iz in range(nPMLs[1]):
        velPML[Nz - iz - 1, :] = gf(velPML[Nz - iz - 1, :],
                                    (nPMLs[1] - iz) * factor)
    for ix in range(nPMLs[2]):
        velPML[:, ix] = gf(velPML[:, ix], (nPMLs[2] - ix) * factor)
    for ix in range(nPMLs[3]):
        velPML[:, Nx - ix - 1] = gf(velPML[:, Nx - ix - 1],
                                    (nPMLs[3] - ix) * factor)
    return velPML
Ejemplo n.º 12
0
    def _load_movie(self, time, slc):
        mvars = 'all'
        d = self._M.get_fields('all', time, slc=slc)

        # This is dumb and should be done in moive
        #if slc is None:
        #    ip,jp = 2*[np.s_[:]]
        #else:
        #    ip,jp = slc[:2]

        #d['xx'] = d['xx'][ip]
        #d['yy'] = d['yy'][jp]

        for q,s in zip([1.,-1.],'ie'):
            for k in 'xx xy xz yy yz zz'.split():
                d['t'+s+k] = d['p'+s+k]/d['n'+s]

            rotate_ten(d,'t'+s, av='')

            for k in 'xyz':
                d['v'+s+k] = q*d['j'+s+k]/d['n'+s]
            
            mag = lambda _x,_y: _x**2 + _y**2

            #d['|b|'] = np.sqrt(reduce(mag, (d['b'+k] for k in'xyz')))
            d['|b|'] = np.sqrt(d['bx']**2 + d['by']**2 + d['bz']**2)

            d['psi'] = gf(calc_psi(d), sigma=self.sig)

        return d
Ejemplo n.º 13
0
    def __fig2pixel(p, plotPxl=False, smoothing=0., LIMITS=XYLIM):
        #       smoothing is std of gaussian 2d filter. set to 0 to not smooth.
        #       https://stackoverflow.com/questions/43363388/how-to-save-a-greyscale-matplotlib-plot-to-numpy-array
        from skimage import color
        ax = plot(p, LIMITS=LIMITS)
        fig = ax.get_figure()
        fig.canvas.draw()
        ax.axis("off")

        width, height = fig.get_size_inches() * fig.get_dpi()
        # print("dpi: {}".format(fig.get_dpi()))
        # import pdb
        # pdb.set_trace()
        img = np.frombuffer(fig.canvas.tostring_rgb(),
                            dtype='uint8').reshape(int(height), int(width), 3)
        img = color.rgb2gray(img)

        if smoothing > 0:
            img = gf(img, smoothing, truncate=5)

        if plotPxl:
            # - show the figure
            plt.figure()
            # plt.imshow(img, vmin=0, vmax=1, cmap="gray", interpolation="bicubic")
            plt.imshow(img, vmin=0, vmax=1, cmap="gray")

        return img
Ejemplo n.º 14
0
def collapse_along_LOS(DD, nbins=None, s=0):
    """Sums pair counts over the first nbins along the sightline
    dimension. Returns an array with the values for transverse bins. If
    nbins is None then collapses the whole array.

    Parameters
    ----------
    DD : ndarray
      Pair counts to sum
    nbins :  int, optional
      Number of bins in the radial dimension to collapse along
      If None, take them all
    s : float, optional
      For Gaussian filtering

    Returns
    -------
    DD_1D : ndarray
      Collapsed pair counts
    """
    # Gaussian filter
    if s > 0:
        sDD = gf(DD, s)
    else:
        sDD = DD
    if nbins is None:
        nbins = sDD.shape[0]
    # Avoidable loop?
    #old_DD_1D = np.array([np.sum(sDD.T[i][:nbins]) for i in range(sDD.shape[1])])
    DD_1D = np.sum(sDD[:nbins, :], axis=0)
    # Return
    return DD_1D
Ejemplo n.º 15
0
    def __init__(self, cell, ch):
        self.name = cell
        movie = Image.open(self.name + '.tif')
        self.ch = ch[0]
        self.sample = ch[1]
        self.n_ch = len(ch[0])
        self.n_frame = int((movie.n_frames) / self.n_ch)
        self.width = movie.width
        self.height = movie.height

        # I[channel,frame,row,column]
        self.I = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                          dtype=float)
        self.Is = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                           dtype=float)
        self.mask1 = np.zeros(
            (self.n_ch, self.n_frame, self.height, self.width), dtype=bool)
        self.mask2 = np.zeros(
            (self.n_ch, self.n_frame, self.height, self.width), dtype=bool)
        self.Im1 = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                            dtype=float)
        self.Im2 = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                            dtype=float)
        self.pcc1 = np.zeros((self.n_ch, self.n_frame), dtype=float)
        self.pcc2 = np.zeros((self.n_ch, self.n_frame), dtype=float)

        # Read data from each channel
        for i in range(self.n_ch):
            movie_i = Image.open(cell + '-' + ch[0][i] + '.tif')
            # Read data from each frame
            for j in range(self.n_frame):
                movie_i.seek(j)  # Move to frame j
                I0 = np.array(movie_i, dtype=float)
                self.I[i, j] = I0 - I0.min()
                self.Is[i, j] = gf(self.I[i, j], sigma)

        # Normalize signal from each channel
        for i in range(self.n_ch):
            Imax = self.I[i].max()
            self.I[i] = self.I[i] / Imax
            self.Is[i] = self.Is[i] / Imax

        # Masking to find cell boundary and clusters
        for j in range(self.n_frame):  # Each frame
            for i in range(self.n_ch):  # Each channel
                # Mask1 to find a cell from ch2 (because LAT/LCK are more uniformly spread)
                m1 = self.Is[1, j] > np.percentile(self.Is[1, j], p1)
                self.mask1[i, j] = m1
                self.Im1[i, j, m1] = self.I[i, j, m1]
                # Mask2 to find clusters within the cell boundary
                self.mask2[i, j, m1] = self.Is[i, j, m1] > np.percentile(
                    self.Is[i, j, m1], p2)
                self.Im2[i, j, self.mask2[i, j]] = self.I[i, j, self.mask2[i,
                                                                           j]]
                i1 = i
                i2 = (i1 + 1) % self.n_ch
                self.pcc1[i, j] = st.pearsonr(self.I[i1, j, m1].flatten(),
                                              self.I[i2, j, m1].flatten())[0]
Ejemplo n.º 16
0
    def _plot1D(self,
                ip,
                var_labels=None,
                ptargs=None,
                ldgargs=None,
                xlim=None,
                ylim=None):
        """ Function to make a plot of a 1D cut

            Parameters
            =========
            ax : (matplotlib.pyplot.axis)
            xy : (numpy.array)
            vrs : [numpy.array]

            Todo: Fix the stuipd kwargs
        """

        if var_labels is None: var_labels = self.page_vars_1D

        # We will come back it this if we have to
        #if not ptargs:
        #    ptargs = 3*({},)
        #elif type(ptargs) is dict:
        #    ptargs =3*(ptargs,)

        _xy = self.d[2 * self._cut_dir]
        _lim = _xy[[0, -1]]
        _cut_index = np.s_[ip, :]
        if self._cut_dir is 'x': _cut_index = _cut_index[::-1]

        lines = []
        for a, vrs, labs in zip(self.ax, self.page_vars_1D, var_labels):

            _sl = []
            #for v,l,pwargs in zip(vrs, var_labels, plot_kwargs):
            for v, l in zip(vrs, labs):
                gfv = gf(self.d[v][_cut_index], sigma=self.sig)
                _sl += a.plot(_xy, gfv, label=l)

            lines += [_sl]

            a.set_xlim(_lim)

            _tl = 'cut @ {} = {:1.2f}'.format(
                self._not_cut_dir, self.d[2 * self._not_cut_dir][ip])

            a.set_title(_tl, size=6, loc='right')

            if ldgargs:
                if type(ldgargs) is not dict: ldgargs = {}
                a.legend(**ldgargs)

            a.minorticks_on()

        return lines
Ejemplo n.º 17
0
 def VelPadding(self):
     from scipy.ndimage import gaussian_filter as gf
     nPMLs = self.nPMLs
     factor = self.factor
     vel = self.vel
     velpad = np.pad(vel, [nPMLs[:2], nPMLs[2:]], 'edge')
     Nz, Nx = np.shape(velpad)
     for iz in range(nPMLs[0]):
         velpad[iz, :] = gf(velpad[iz, :], (nPMLs[0] - iz) * factor)
     for iz in range(nPMLs[1]):
         velpad[Nz - iz - 1, :] = gf(velpad[Nz - iz - 1, :],
                                     (nPMLs[1] - iz) * factor)
     for ix in range(nPMLs[2]):
         velpad[:, ix] = gf(velpad[:, ix], (nPMLs[2] - ix) * factor)
     for ix in range(nPMLs[3]):
         velpad[:, Nx - ix - 1] = gf(velpad[:, Nx - ix - 1],
                                     (nPMLs[3] - ix) * factor)
     velpad = np.floor(velpad)
     return velpad
Ejemplo n.º 18
0
def phase_corr(fixed, moving, sigma):
    if fixed.shape > moving.shape:
        print('fixed image is larger than moving', fixed.shape, moving.shape)
        fixed = fixed[tuple(map(slice, moving.shape))]
        print('fixed image resized to', fixed.shape)
    elif fixed.shape < moving.shape:
        print('fixed image is smaller than moving', fixed.shape, moving.shape)
        moving = moving[tuple(map(slice, fixed.shape))]
        print('moving image resized to', moving.shape)
    fixed = gf(fixed, sigma=sigma)
    moving = gf(moving, sigma=sigma)
    print('applying phase correlation')
    try:
        for i in [0]:
            shift, error, diffphase = corr(fixed, moving)
    except:
        for i in [0]:
            shift, error, diffphase = np.zeros(len(moving)), 0, 0
            print("couldn't perform PhaseCorr, so shift was casted as zeros")
    return shift
Ejemplo n.º 19
0
def get_dens_from_hab(f: pd.DataFrame):

    lam = f['LAT'].min()
    laM = f['LAT'].max()
    lom = f['LON'].min()
    loM = f['LON'].max()

    R = .05
    lam = np.floor(lam / R) * R - R
    laM = np.ceil(laM / R) * R + R

    lom = np.floor(lom / R) * R - R
    loM = np.ceil(loM / R) * R + R

    # %%
    lo_range = np.arange(lom, loM + R / 2, R)
    la_range = np.arange(lam, laM + R / 2, R)
    lo_lab = lo_range[:-1] + R / 2
    la_lab = la_range[:-1] + R / 2

    # %%

    # %%
    d, _, _ = np.histogram2d(f['LON'],
                             f['LAT'],
                             bins=(lo_range, la_range),
                             weights=f['HAB'])

    from scipy.ndimage import gaussian_filter as gf
    d1 = gf(d, sigma=.5)

    # %%
    ar = xr.DataArray(d1.T,
                      dims=['LAT', 'LON'],
                      coords={
                          'LAT': la_lab,
                          'LON': lo_lab
                      })

    # %%
    de = ar / (R * R * 100 * 100)

    # %%
    LA = xr.DataArray(f['LAT'], dims=f.index.name)
    LO = xr.DataArray(f['LON'], dims=f.index.name)

    # %%
    de.name = 'DEN'
    res = de.interp({'LAT': LA, 'LON': LO}).to_dataframe()
    f_out = f.copy()
    f_out['DEN'] = res['DEN']
    return f_out
Ejemplo n.º 20
0
def random_abs(absreal,Nrand,wa,er,sl=3,R=20000,ion='HI'):
    """From a real absorber catalog it creates a random catalog.  For
    a given real absorber with (z_obs,logN_obs,b_obs) it places it at
    a new z_rand, defined by where the line could have been
    observed. 
    
    Input parameters:
    ---
    absreal: numpy rec array with the absorber catalog.
    Nrand:   number of random lines per real one generated (integer).
    wa:      numpy array of wavelenght covered by the spectrum.  
    er:      numpy array of error in the normalized flux of the spectrum for 
             a given wavelenght.
    sl:      significance level for the detection of the absorption line.
    
    From the error we calculate the Wmin = sl * wa * er / (1+z) / R,
    where z = wa/w0 - 1 (w0 is the rest frame wavelenght of the
    transition) and R is the resolution of the spectrograp. We then
    smooth Wmin with a boxcar (sharp edges). 
    
    For the given absorber we transform (logN_obs,b_obs) to a W_obs assuming 
    linear part of the curve-of-growth. 
    
    We then compute the redshifts where W_obs could have been observed
    according to the given Wmin, and place Nrand new absorbers with
    the same properties as the given one accordingly.
    """
    from astro.sampledist import RanDist
    from scipy.ndimage import gaussian_filter as gf
    
    absreal.sort(order='LOGN') #np.recarray.sort() sorted by column density
    Nrand   = int(Nrand)
    absrand = absreal.repeat(Nrand)
    Ckms  = 299792.458
    if ion=='HI':
        w0    = 1215.67  # HI w0 in angstroms
    z     = wa/w0 - 1.   # spectrum in z coordinates
    
    er   = np.where(er==0,1e10,er)
    er   = np.where(np.isnan(er),1e10,er)
    Wmin = 3*sl*w0*er/R  #3*sl*wa*er / (1. + z) / R
    Wmin = gf(Wmin.astype(float),10) # smoothed version 
    
    for i in xrange(len(absreal)):
        Wr     = logN_b_to_Wr(absreal.LOGN[i],absreal.B[i],ion='HI')
        zgood  = (Wr > Wmin) & (z>0)
        rand_z = RanDist(z, zgood*1.)
        zrand  = rand_z.random(Nrand)
        absrand.ZABS[i*Nrand:(i+1)*Nrand] = zrand

    return absrand 
Ejemplo n.º 21
0
def random_abs(absreal, Nrand, wa, er, sl=3, R=20000, ion='HI'):
    """From a real absorber catalog it creates a random catalog.  For
    a given real absorber with (z_obs,logN_obs,b_obs) it places it at
    a new z_rand, defined by where the line could have been
    observed. 
    
    Input parameters:
    ---
    absreal: numpy rec array with the absorber catalog.
    Nrand:   number of random lines per real one generated (integer).
    wa:      numpy array of wavelenght covered by the spectrum.  
    er:      numpy array of error in the normalized flux of the spectrum for 
             a given wavelenght.
    sl:      significance level for the detection of the absorption line.
    
    From the error we calculate the Wmin = sl * wa * er / (1+z) / R,
    where z = wa/w0 - 1 (w0 is the rest frame wavelenght of the
    transition) and R is the resolution of the spectrograp. We then
    smooth Wmin with a boxcar (sharp edges). 
    
    For the given absorber we transform (logN_obs,b_obs) to a W_obs assuming 
    linear part of the curve-of-growth. 
    
    We then compute the redshifts where W_obs could have been observed
    according to the given Wmin, and place Nrand new absorbers with
    the same properties as the given one accordingly.
    """
    from astro.sampledist import RanDist
    from scipy.ndimage import gaussian_filter as gf

    absreal.sort(order='LOGN')  #np.recarray.sort() sorted by column density
    Nrand = int(Nrand)
    absrand = absreal.repeat(Nrand)
    Ckms = 299792.458
    if ion == 'HI':
        w0 = 1215.67  # HI w0 in angstroms
    z = wa / w0 - 1.  # spectrum in z coordinates

    er = np.where(er == 0, 1e10, er)
    er = np.where(np.isnan(er), 1e10, er)
    Wmin = 3 * sl * w0 * er / R  #3*sl*wa*er / (1. + z) / R
    Wmin = gf(Wmin.astype(float), 10)  # smoothed version

    for i in xrange(len(absreal)):
        Wr = logN_b_to_Wr(absreal.LOGN[i], absreal.B[i], ion='HI')
        zgood = (Wr > Wmin) & (z > 0)
        rand_z = RanDist(z, zgood * 1.)
        zrand = rand_z.random(Nrand)
        absrand.ZABS[i * Nrand:(i + 1) * Nrand] = zrand

    return absrand
Ejemplo n.º 22
0
def create_slice():
    import numpy as np
    from TurbAn.Utilities.subs import create_object
    from scipy.ndimage import gaussian_filter as gf
    # Create the P3D-Old Object
    rc = create_object()
    # Ask for variables to extract
    vars2ext = input(
        "Which variables to extract? e.g. all or bx by bz etc. ").split()
    # Set those variables to be loaded in P3D object
    rc.vars2load(vars2ext)
    # Ask for time slice to read
    slice2ext = int(input("Which slice out of " + str(rc.numslices) + "? "))
    # Ask if want to smooth data
    smooth = int(input("How much smoothing (integer, 0 for none)? "))
    if smooth == '': smooth = 0
    # Load the time slice
    rc.loadslice(slice2ext)

    # Write the variables to a file
    for i in rc.vars2l:
        filename = rc.dirname + "." + i + "." + str(slice2ext) + ".dat"
        print(filename)
        gf(rc.__dict__[i], sigma=smooth).tofile(filename)
Ejemplo n.º 23
0
    def _smooth(self, xx, yy, H, smooth):
        if smooth:
            from scipy.ndimage import gaussian_filter as gf

            # smooth is assumed to be in velocity units
            # So we need to convert it to grid units
            # Also note that we are assuming dvx = dvy which does 
            # not have to be true!
            sig = smooth/(xx[1] - xx[0] + yy[1] - yy[0])*2.

            #return gf(H, sigma=sig, mode='constant', cval=0.)
            return gf(H, sigma=sig)

        else:
            return H
Ejemplo n.º 24
0
 def __preprocess(self):
     logger.info('Pre-processing...')
     try:
         sss = 0.7
         ct = sitk.GetArrayViewFromImage(self.image3D).copy()
         ct[ct < -1024] = -1024
         sig = min((ct.shape[2] / 150.) * sss, 1)
         ct = gf(ct, sig)
         ct = self.__calculate_window(ct)
         ct = np.array([ct, ct, ct]).transpose(1, 2, 3, 0)
         logger.info('Pre-process complete!')
         return ct
     except Exception as e:
         message = (f'Failed to pre-process. Reason: {str(e)}.')
         logger.error(message)
Ejemplo n.º 25
0
def pgmultiplt(rc,variables,bs,fs,step,pgcmp,smooth,numsmooth):
    import numpy as np
    import pyqtgraph as pg
    from pyqtgraph.Qt import QtGui, QtCore
    
    rcd=rc.__dict__
    if smooth == 'y':
       from scipy.ndimage import gaussian_filter as gf
    # Create the window
    app=QtGui.QApplication([])
    win=pg.GraphicsWindow(title="Multiplot-Test")
    win.resize(1000,600)
    pg.setConfigOptions(antialias=True)
    
    #Create the canvas
    pltdict={}; imgdict={}
    for j in rc.vars2l:
       idx=rc.vars2l.index(j)
       if idx < 4:
          haha=[]
       elif np.mod(idx,4) == 0:
          win.nextRow()
       exec('p'+j+'=win.addPlot()')
       exec('pltdict["'+j+'"]=p'+j)
       if (rc.ny > 1):
          exec('img'+j+'=pg.ImageItem()')
          exec('imgdict["'+j+'"]=img'+j)
          pltdict[j].addItem(imgdict[j])
    #     pltdict[j].setAspectLocked()
    win.show()
    
    # Loop for plottting multiple time slices
    for it in range(bs,fs,step):
       print('Reading time slice ', it)
       rc.loadslice(it)
    # Make plots
       for j in rc.vars2l:
          if (rc.ny==1 and rc.nz==1):
             pltdict[j].plot(rc.xx,rcd[j][:,0,0],clear=True)
          else:
             if smooth == 'y':
                imgdict[j].setImage(gf(rcd[j][:,::-1,0].T,sigma=numsmooth),clear=True,lut=pgcmp)
             else:
                imgdict[j].setImage(rcd[j][:,::-1,0].T,clear=True,lut=pgcmp)
          pltdict[j].setTitle(j+' '+sminmax(rcd[j]))
       pg.QtGui.QApplication.processEvents()
       haha=input('Hello?')
    print('All done!')
Ejemplo n.º 26
0
def psf_recenter(stack, r_mask = 40, cy_ext = 1.5):
    '''
    find the center of the psf and
    stack: the raw_psf
    r_mask: the radius of the mask size
    cy_ext: how far the background should extend to the outside
    '''
    nz, ny, nx = stack.shape
    cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:]

    ny_shift = int(ny/2 - cy)
    nx_shift = int(nx/2 - cx)
    PSF = np.roll(stack, ny_shift, axis = 1)
    PSF = np.roll(PSF, nx_shift, axis = 2)

    return PSF
Ejemplo n.º 27
0
def psf_recenter(stack, r_mask = 40, cy_ext = 1.5):
    '''
    find the center of the psf and
    stack: the raw_psf
    r_mask: the radius of the mask size
    cy_ext: how far the background should extend to the outside
    '''
    nz, ny, nx = stack.shape
    cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:]

    ny_shift = int(ny/2 - cy)
    nx_shift = int(nx/2 - cx)
    PSF = np.roll(stack, ny_shift, axis = 1)
    PSF = np.roll(PSF, nx_shift, axis = 2)

    return PSF
Ejemplo n.º 28
0
 def loadslice(self, it, smth=None):
     """
      Load the variables initialized by self.vars2load()
   """
     if self.data_type in ('b', 'bb'):
         for i in self.vars2l:
             self.__dict__[i] = self.readslice(self.__dict__[i + 'f'], it,
                                               i)
     else:
         for i in self.vars2l:
             self.__dict__[i] = self.readslice(self.__dict__[i + 'f'], it)
     self.mmd = {}
     for i in self.vars2l:
         if smth is not None:
             self.__dict__[i] = gf(self.__dict__[i], sigma=smth)
         self.mmd[i] = [self.__dict__[i].min(), self.__dict__[i].max()]
     self.time = it * self.dtmovie
Ejemplo n.º 29
0
   def loadslice(self,it,smth=None):
      """
         Load the variables initialized by self.vars2load()
      """
      for i in self.vars2l:
         if i in self.primitives:
            self.__dict__[i]=self.readslice(self.__dict__[i+'f'],it,i)
      for i in self.vars2l:
         if i in self.derived:
           #self.__dict__[i] = self._derivedv(i)
            self._derivedv(i)

      self.mmd={}
      for i in self.vars2l:
         if smth is not None:
            self.__dict__[i]=gf(self.__dict__[i],sigma=smth)
         self.mmd[i]=[self.__dict__[i].min(),self.__dict__[i].max()]
      self.time = it*self.dtmovie
Ejemplo n.º 30
0
    def xi_gg_LS(self,sigma=0,jacknife=True,f1=None,f2=None,f3=None):
        from pyntejos.xcorr.xcorr import W3
        from scipy.ndimage import gaussian_filter as gf

        s = sigma
        W3gg,_ = W3(gf(self.DgDg,s),gf(self.RgRg,s),gf(self.DgRg,s),gf(self.DgRg,s),f1=f1,f2=f2,f3=f3)
        
        #jacknife error
        err_W3jk = np.zeros((len(self.rbinedges) - 1, len(self.tbinedges) - 1), float)
        if jacknife:
            for field in self.fields:
                DgDg_aux = self.DgDg - field.DgDg(self.rbinedges,self.tbinedges) 
                RgRg_aux = self.RgRg - field.RgRg(self.rbinedges,self.tbinedges)
                DgRg_aux = self.DgRg - field.DgRg(self.rbinedges,self.tbinedges)
                W3gg_aux,_ = W3(gf(DgDg_aux,s),gf(RgRg_aux,s),gf(DgRg_aux,s),gf(DgRg_aux,s),f1=f1,f2=f2,f3=f3)
                err_W3jk += (W3gg - W3gg_aux)**2
            N        = len(self.fields)
            err_W3jk = (N - 1.) / N * err_W3jk
            err_W3jk = np.sqrt(err_W3jk)
        return W3gg, err_W3jk
Ejemplo n.º 31
0
    def __init__(self, cell, ch):
        name = cell + '.tif'
        movie = Image.open(name)
        self.ch = ch[0]
        self.sample = ch[1]
        self.n_ch = len(ch[0])
        self.n_frame = int((movie.n_frames) / self.n_ch)
        self.width = movie.width
        self.height = movie.height

        # I[channel,frame,row,column]
        self.I = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                          dtype=int)
        self.Is = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                           dtype=int)
        self.mask1 = np.zeros(
            (self.n_ch, self.n_frame, self.height, self.width), dtype=bool)
        self.mask2 = np.zeros(
            (self.n_ch, self.n_frame, self.height, self.width), dtype=bool)
        self.Im1 = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                            dtype=int)
        self.Im2 = np.zeros((self.n_ch, self.n_frame, self.height, self.width),
                            dtype=int)

        for i in range(self.n_ch):  # ith channel
            movie_i = Image.open(cell + '-' + ch[0][i] + '.tif')
            for j in range(self.n_frame):  # jth frame
                movie_i.seek(j)
                I0 = np.array(movie_i, dtype=int)
                self.I[i, j] = I0 - I0.min()
                self.Is[i, j] = gf(self.I[i, j], sigma)

        for j in range(self.n_frame):
            for i in range(self.n_ch):
                # Mask1 to find a cell from ch2
                m1 = self.Is[1, j] > np.percentile(self.Is[1, j], p1)
                self.mask1[i, j] = m1
                self.Im1[i, j, m1] = self.I[i, j, m1]
                self.mask2[i, j, m1] = self.Is[i, j, m1] > np.percentile(
                    self.Is[i, j, m1], p2)
                self.Im2[i, j, self.mask2[i, j]] = self.I[i, j, self.mask2[i,
                                                                           j]]
Ejemplo n.º 32
0
def psf_zplane(stack, dz, w0, de = 1):
    '''
    determine the position of the real focal plane.
    Don't mistake with psf_slice!
    '''
    nz, ny, nx = stack.shape
    cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:]

    zrange = (nz-1)*dz*0.5
    zz = np.linspace(-zrange, zrange, nz)
    center_z = stack[:,cy-de:cy+de+1,cx-de:cx+de+1]
    im_z = center_z.mean(axis=2).mean(axis=1)

    b = np.mean((im_z[0],im_z[-1]))
    a = im_z.max() - b

    p0 = (a,0,w0,b)
    popt = optimize.curve_fit(gaussian, zz, im_z, p0)[0]
    z_offset = popt[1] # The original version is wrong
    return z_offset, zz
Ejemplo n.º 33
0
def psf_zplane(stack, dz, w0, de = 1):
    '''
    determine the position of the real focal plane.
    Don't mistake with psf_slice!
    '''
    nz, ny, nx = stack.shape
    cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:]

    zrange = (nz-1)*dz*0.5
    zz = np.linspace(-zrange, zrange, nz)
    center_z = stack[:,cy-de:cy+de+1,cx-de:cx+de+1]
    im_z = center_z.mean(axis=2).mean(axis=1)

    b = np.mean((im_z[0],im_z[-1]))
    a = im_z.max() - b

    p0 = (a,0,w0,b)
    popt = optimize.curve_fit(gaussian, zz, im_z, p0)[0]
    z_offset = popt[1] # The original version is wrong
    return z_offset, zz
Ejemplo n.º 34
0
    def binarize(self, sigma=None, smooth=None):
        """
        Convert spectra to boolean values at each wavelength.

        The procedure estimates the noise by taking the standard deviation 
        of the derivative spectrum and dividing by sqrt(2).  The 
        zero-point offset for each spectrum is estimated as the mean of 
        the first 10 wavelengths (empirically seen to be "flat" for most 
        spectra) and is removed.  Resultant points >5sigma [default] are 
        given a value of True.

        Parameters
        ----------
        sigma : float, optional
            Sets the threshold, above which the wavelength is considered 
            to have flux.
        """

        # -- smooth if desired
        dat = self.data if not smooth else gf(self.data,[smooth,0,0])

        if sigma:
            # -- estimate the noise and zero point for each spectrum
            print("BINARIZE: estimating noise level and zero-point...")
            sig = (dat[1:]-dat[:-1])[-100:].std(0)/np.sqrt(2.0)
            zer = dat[:10].mean(0)

            # -- converting to binary
            print("BINARIZE: converting spectra to boolean...")
            self.bdata = (dat-zer)>(sigma*sig)
        else:
            # -- careful about diffraction spikes which look like absoportion
            mn_tot = dat.mean(0)
            mn_end = dat[-100:].mean(0)
            index  = mn_tot > mn_end
            mn     = mn_tot*index + mn_end*~index

            # -- binarize by comparison with mean
            self.bdata = dat>mn

        return
Ejemplo n.º 35
0
def pderiv(ar,dx=1.,ax=0,order=2,smth=None):
   """
      pderiv gives the first partial derivative
      of a periodic array along a given axis.

      Inputs:
         ar - The input array
         dx - Grid spacing, defaults to 1.
         ax - Axis along which to take the derivative
         order - Order of accuracy, (1,2) defaults to 2

      Output:
         dar - The derivative array
   """
   if smth is not None:
      ar = gf(ar,sigma=smth)
   if order == 1:
      dar = (np.roll(ar,-1,axis=ax)-ar)/dx
   elif order == 2:
      dar = (np.roll(ar,-1,axis=ax)-np.roll(ar,1,axis=ax))/(2*dx)
   
   return dar 
Ejemplo n.º 36
0
def sdss2sl(infile, mask=None, dopcor=False, writetxt=False,
            outfile='lixo.txt', gauss_convolve=0, normspec=False,
            wlnorm=[6000, 6100], dwl=1, integerwl=True):
    """
    Creates an ASCII file from the sdss spectrum file.

    Parameters
    ----------
    infile : string
        Name of the original SDSS file.
    mask : string
        Name of the ASCII maks definition file.
    dopcor : bool
        Apply doppler correction based on the redshift from the
        infile header.
    write
    """

    hdul = pf.open(infile)

    wl0 = hdul[0].header['crval1']
    npoints = np.shape(hdul[0].data)[1]
    dwl = hdul[0].header['cd1_1']

    wl = 10 ** (wl0 + np.linspace(0, npoints * dwl, npoints))

    if dopcor:
        wl = wl / (1. + hdul[0].header['z'])

    spectrum = hdul[0].data[0, :] * 1e-17  # in ergs/s/cm^2/A
    error = hdul[0].data[2, :] * 1e-17  # in ergs/s/cm^2/A
    origmask = hdul[0].data[3, :]

    print('Average dispersion: ', np.average(np.diff(wl)))

    # Linear interpolation of the spectrum and resampling of
    # the spectrum.

    f = interp1d(wl, gf(spectrum, gauss_convolve), kind='linear')
    err = interp1d(wl, gf(error, gauss_convolve), kind='linear')
    om = interp1d(wl, gf(origmask, gauss_convolve), kind='linear')

    if integerwl:
        wlrebin = np.arange(int(wl[0]) + 1, int(wl[-1]) - 1)
        frebin = f(wlrebin)
        erebin = err(wlrebin)
        mrebin = om(wlrebin)

    mcol = np.ones(len(wlrebin))

    if mask is not None:
        masktab = np.loadtxt(mask)
        for i in range(len(masktab)):
            mcol[(wlrebin >= masktab[i, 0]) & (wlrebin <= masktab[i, 1])] = 99
    else:
        mcol = mrebin
        mcol[mcol > 3] = 99

    vectors = [wlrebin, frebin, erebin, mcol]
    txt_format = ['%d', '%.6e', '%.6e', '%d']

    slspec = np.column_stack(vectors)

    if writetxt:
        np.savetxt(outfile, slspec, fmt=txt_format)

    return slspec
Ejemplo n.º 37
0
def fits2sl(spec, mask=None, dwl=1, integerwl=True, writetxt=False,
            errfraction=None, normspec=False, normwl=[6100, 6200],
            gauss_convolve=0):
    """
    Converts a 1D FITS spectrum to the format accepted by Starlight.

    Parameters
    ----------
    spec : string
        Name of the FITS spectrum.
    mask : None or string
        Name of the ASCII file containing the regions to be masked,
        as a sequence of initial and final wavelength coordinates, one
        pair per line.
    dwl : number
        The step in wavelength of the resampled spectrum. We recommend
        using the standard 1 angstrom.
    integerwl : boolean
        True if the wavelength coordinates can be written as integers.
    writetxt : boolean
        True if the function should write an output ASCII file.
    errfraction : number
        Fraction of the signal to be used in case the uncertainties
        are unknown.
    gauss_convolve : number
        Sigma of the gaussian kernel to convolve with the spectrum.

    Returns
    -------
    slspec : numpy.ndarray
        2D array with 4 columns, containing wavelength, flux density,
        uncertainty and flags respectively.
    """

    # Loading spectrum from FITS file.
    a = pf.getdata(spec)
    wl = get_wl(spec)

    print('Average dispersion: ', np.average(np.diff(wl)))

    # Linear interpolation of the spectrum and resampling of
    # the spectrum.

    f = interp1d(wl, gf(a, gauss_convolve), kind='linear')
    if integerwl:
        wlrebin = np.arange(int(wl[0]) + 1, int(wl[-1]) - 1)
        frebin = f(wlrebin)

    mcol = np.ones(len(wlrebin))

    if mask is not None:
        masktab = np.loadtxt(mask)
        for i in range(len(masktab)):
            mcol[(wlrebin >= masktab[i, 0]) & (wlrebin <= masktab[i, 1])] = 99

    if normspec:
        normfactor = 1. / np.median(frebin[(wlrebin > normwl[0]) &
                                           (wlrebin < normwl[1])])
    else:
        normfactor = 1.0

    frebin *= normfactor

    if (errfraction is not None) and (mask is not None):
        vectors = [wlrebin, frebin, frebin * errfraction, mcol]
        txt_format = ['%d', '%.6e', '%.6e', '%d']
    elif (errfraction is not None) and (mask is None):
        vectors = [wlrebin, frebin, frebin * errfraction]
        txt_format = ['%d', '%.6e', '%.6e']
    elif (errfraction is None) and (mask is not None):
        vectors = [wlrebin, frebin, mcol]
        txt_format = ['%d', '%.6e', '%d']
    elif (errfraction is None) and (mask is None):
        vectors = [wlrebin, frebin]
        txt_format = ['%d', '%.6e']

    slspec = np.column_stack(vectors)

    if writetxt:
        np.savetxt(spec.strip('fits') + 'txt', slspec, fmt=txt_format)

    return slspec
Ejemplo n.º 38
0
def xradia_star(sh,spokes=48,std=0.5,minfeature=5,ringfact=2,rings=4,contrast=1.,Fast=False):
    """\
    creates an Xradia-like star pattern on on array of shape sh
    std: "resolution" of xradia star, i.e. standard deviation of the 
         errorfunction used for smoothing the step (in pixel)
    spokes : number of spokes
    minfeature : smallest spoke width (in pixel)
    ringfact : factorial increase in featuresize from ring to ring.
    rings : number of rings
    contrast : minimum contrast, set to 0 for gradual color change from zero to 1 
               set to 1 for no gradient in the spokes
                  
    Fast : if set to False, the error function is evaluated at the edges
            -> preferred when using fft, as its features are less prone to antiaaliasing
           if set to True, simple boolean comparison will be used instead and the 
           result is later blurred with a gaussian filter.
            -> roughly a factor 2 faster
    """
    from scipy.ndimage import gaussian_filter as gf
    from scipy.special import erf
    
    def step(x,a,std=0.5):
        if not Fast:
            return 0.5*erf((x-a)/(std*2))+0.5
        else:
            return (x>a).astype(float)
            
    def rect(x,a):
        return step(x,-a/2.,std) * step(-x,-a/2.,std)
    
    def rectint(x,a,b):
        return step(x,a,std) * step(-x,-b,std)
    
    ind=np.indices(sh)
    cen=(np.array(sh)-1)/2.0
    ind=ind-cen.reshape(cen.shape+len(cen)*(1,))
    z=ind[1]+1j*ind[0]
    spokeint,spokestep=np.linspace(0.0*np.pi,1.0*np.pi,spokes/2,False,True)   
    spokeint+=spokestep/2

    r=np.abs(z)
    r0=(minfeature/2.0)/np.sin(spokestep/4.)
    rlist=[]
    rin=r0
    for ii in range(rings):
        if rin > max(sh)/np.sqrt(2.):
            break
        rin*=ringfact
        rlist.append((rin*(1-2*np.sin(spokestep/4.)),rin))
        
    spokes=np.zeros(sh)
    contrast= np.min((np.abs(contrast),1))
    
    mn=min(spokeint)
    mx=max(spokeint)
    for a in spokeint:
        color = 0.5-np.abs((a-mn)/(mx-mn)-0.5)
        spoke=step(np.real(z*np.exp(-1j*(a+spokestep/4))),0)-step(np.real(z*np.exp(-1j*(a-spokestep/4))),0)
        spokes+= (spoke*color+0.5*np.abs(spoke))*(1-contrast) + contrast*np.abs(spoke)
    
    spokes*=step(r,r0)
    spokes*=step(rlist[-1][0],r)
    for ii in range(len(rlist)-1):
        a,b=rlist[ii]
        spokes*=(1.0-rectint(r,a,b))

    if Fast:
        return gf(spokes,std)
    else:
        return spokes
    def acquirePSF(self, range_, nSlices, nFrames, center_xy=True, filename=None,
                   mask_size = 100, mask_center = (-1,-1)):
        '''
        Acquires a PSF stack. The PSF is returned but also stored internally.

        :Parameters:
            *range_*: float
                The range of the scan around the current axial position in micrometers.
            *nSlices*: int
                The number of PSF slices to acquire.
            *nFrames*: int
                The number of frames to be averaged for each PSF slice.
            *filename*: str
                The file name into which the PSF will be saved.

        :Returns:
            *PSF*: numpy.array
                An array of shape (k,l,m), where k are the number of PSF slices and
                (l,m) the lateral slice dimensions.
        '''

        # Logging
        self._settings['range'] = range_
        self._settings['nSlices'] = nSlices
        self._settings['nFrames'] = nFrames
        self._settings['filename'] = filename

        # Some parameters
        start = range_/2.0
        end = -range_/2.0
        self._dz = abs(range_/(nSlices-1.0))

        # Scan the PSF:
        scan = self._control.piezoscan.scan(start, end, nSlices, nFrames, filename)

        nz, nx, ny = scan.shape
        # An empty PSF
        PSF = np.zeros_like(scan)
        # Geometry info
        g = pupil.Geometry((nx,ny), nx/2.-0.5, ny/2.-0.5, 16)
        # Filled cylinder:
        cyl = np.array(nz*[g.r_pxl<16])
        new_cyl = np.array(nz*[g.r_pxl<mask_size])
        # Fill the empty PSF with every voxel in scan where cyl=True
        if not center_xy:
            PSF[cyl] = scan[cyl]
        # Hollow cylinder
        hcyl = np.array(nz*[np.logical_and(g.r_pxl>=50, g.r_pxl<61)])
        if mask_center[0] > -1:
            g2 = pupil.Geometry((nx,ny), mask_center[0], mask_center[1], 16)
            mask = g2.r_pxl<mask_size
        else:
            mask = g.r_pxl<mask_size
        if filename:
            np.save(filename+"pre-cut", scan)
        if center_xy:
            # The coordinates of the brightest pixel
            cz, cx, cy = np.unravel_index(np.argmax(gf(scan*mask,2)), scan.shape)
            print "Center found at: ", (cz,cx,cy)
            # We laterally center the scan at the brightest pixel
            cut = scan[:,cx-mask_size:cx+mask_size,cy-mask_size:cy+mask_size]
            PSF = np.zeros((nz,nx,ny))
            PSF[:,nx/2-mask_size:nx/2+mask_size,ny/2-mask_size:ny/2+mask_size] = cut
            # Background estimation
            self._background = np.mean(scan[hcyl])
            print "Background guess: ", self._background
        else:
            self._background = np.mean(scan[hcyl])
        PSF[np.logical_not(new_cyl)] = self._background

        self._PSF = PSF
        if filename:
            np.save(filename, PSF)
        return PSF