Ejemplo n.º 1
0
    def __init__(self,
                 file_path,
                 *,
                 num_conv_points=138,
                 dt=0.1,
                 center=0,
                 initial_state=0,
                 total_num_time_points=2000):
        self.slicing_time = 0
        self.interpolation_time = 0
        self.expectation_time = 0
        self.next_order_expectation_time = 0
        self.convolution_time = 0
        self.extend_time = 0
        self.mask_time = 0
        self.dipole_time = 0
        self.base_path = file_path

        self.undersample_factor = 1

        self.set_homogeneous_linewidth(0.05)
        self.set_inhomogeneous_linewidth(0)

        self.load_eigenvalues()

        self.load_mu()

        self.efield_t = np.arange(-(num_conv_points // 2), num_conv_points // 2
                                  + num_conv_points % 2) * dt
        self.efield_w = 2 * np.pi * fftshift(fftfreq(self.efield_t.size, d=dt))

        # Code will not actually function until the following three empty lists are set by the user
        self.efields = []  #initialize empty list of electric field shapes
        self.polarization_sequence = [
        ]  #initialize empty polarization sequence
        self.pulse_times = []  #initialize empty list of pulse arrival times

        HeavisideConvolve.__init__(self, num_conv_points)

        # Initialize time array to be used for all desired delay times
        self.t = np.arange(
            -(total_num_time_points // 2),
            total_num_time_points // 2 + total_num_time_points % 2) * dt

        # The first pulse is assumed to arrive at t = 0, therefore shift array so that
        # it includes only points where the signal will be nonzero (number of negative time points
        # is essentially based upon width of the electric field, via the proxy of the size parameter
        self.t += self.t[-(self.size // 2 + 1)]
        self.dt = dt

        # f = fftshift(fftfreq(self.t.size-self.t.size%2,d=self.dt))
        f = fftshift(fftfreq(self.t.size, d=self.dt))
        self.w = 2 * np.pi * f

        self.initial_ground_state_index = initial_state

        # Define the unitary operator for each manifold in the RWA given the rotating frequency center
        self.recenter(new_center=center)

        self.gamma_res = 6.91
Ejemplo n.º 2
0
    def fillWithGaussianRandomField(self, ell, Cell, bufferFactor=1, threads=1):
        """
        Generate a Gaussian random field from an input power spectrum specified 
        as ell, Cell.
        
        Notes
        -----
        BufferFactor = 1 means the map will have periodic boundary function, while
        BufferFactor > 1 means the map will be genrated on a patch bufferFactor 
        times larger in each dimension and then cut out so as to have 
        non-periodic boundary conditions.
        
        Fills the data field of the map with the GRF realization.
        """
        ft = fftTools.fftFromLiteMap(self, threads=threads)
        Ny = self.Ny * bufferFactor
        Nx = self.Nx * bufferFactor

        bufferFactor = int(bufferFactor)

        realPart = numpy.zeros([Ny, Nx])
        imgPart = numpy.zeros([Ny, Nx])

        ly = fftfreq(Ny, d=self.pixScaleY) * (2 * numpy.pi)
        lx = fftfreq(Nx, d=self.pixScaleX) * (2 * numpy.pi)
        # print ly
        modLMap = numpy.zeros([Ny, Nx])
        iy, ix = numpy.mgrid[0:Ny, 0:Nx]
        modLMap[iy, ix] = numpy.sqrt(ly[iy] ** 2 + lx[ix] ** 2)

        s = splrep(ell, Cell, k=3)

        ll = numpy.ravel(modLMap)
        kk = splev(ll, s)
        id = numpy.where(ll > ell.max())
        kk[id] = 0.0
        # add a cosine ^2 falloff at the very end
        # id2 = numpy.where( (ll> (ell.max()-500)) & (ll<ell.max()))
        # lEnd = ll[id2]
        # kk[id2] *= numpy.cos((lEnd-lEnd.min())/(lEnd.max() -lEnd.min())*numpy.pi/2)

        # pylab.loglog(ll,kk)

        area = Nx * Ny * self.pixScaleX * self.pixScaleY
        p = numpy.reshape(kk, [Ny, Nx]) / area * (Nx * Ny) ** 2

        realPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)
        imgPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)

        kMap = realPart + 1j * imgPart

        if have_pyFFTW:
            data = numpy.real(ifft2(kMap, threads=threads))
        else:
            data = numpy.real(ifft2(kMap))
        b = bufferFactor
        self.data = data[(b - 1) / 2 * self.Ny : (b + 1) / 2 * self.Ny, (b - 1) / 2 * self.Nx : (b + 1) / 2 * self.Nx]
Ejemplo n.º 3
0
    def __init__(self,
                 file_path,
                 *,
                 num_conv_points=138,
                 dt=0.1,
                 center=0,
                 initial_state=0,
                 total_num_time_points=2000):
        self.size = num_conv_points
        # Initialize time array to be used for all desired delay times
        self.t = np.arange(
            -(total_num_time_points // 2),
            total_num_time_points // 2 + total_num_time_points % 2) * dt
        self.t += self.t[-(self.size // 2 + 1)]
        self.dt = dt
        parameter_file = os.path.join(file_path, 'params.yaml')
        super().__init__(parameter_file, mask_by_occupation_num=True)
        self.base_path = file_path

        self.load_params()

        self.set_diagrams_and_manifolds()

        self.set_molecular_dipoles()

        self.set_bottom_eigensystem()

        self.set_H()

        self.zero_hamiltonians()

        self.rtol = 1E-6
        self.atol = 1E-6

        self.time_to_extend = 0
        self.time_for_next_order = 0

        ############### Optical part

        self.set_homogeneous_linewidth(0.05)

        self.efield_t = np.arange(-(num_conv_points // 2), num_conv_points // 2
                                  + num_conv_points % 2) * dt
        self.efield_w = 2 * np.pi * fftshift(fftfreq(self.efield_t.size, d=dt))

        # Code will not actually function until the following three empty lists are set by the user
        self.efields = []  #initialize empty list of electric field shapes
        self.polarization_sequence = [
        ]  #initialize empty polarization sequence
        self.pulse_times = []  #initialize empty list of pulse arrival times

        f = fftshift(fftfreq(self.t.size - self.t.size % 2, d=self.dt))
        self.w = 2 * np.pi * f

        # Define the unitary operator for each manifold in the RWA given the rotating frequency center
        self.recenter(new_center=center)
Ejemplo n.º 4
0
def generate_signals(
    shape: tuple,
    specs: list[CompanionSpec],
    template: np.ndarray,
    derotation_angles: np.ndarray = None,
    template_scale_factors: Union[np.ndarray, float, None] = None,
):
    '''Inject signals for companions specified using
    optional derotation angles to *counter*rotate the coordinates
    before injection, such that rotation CCW (e.g. by `derotate_cube`)
    aligns 0 deg PA with +Y

    Parameters
    ----------
    shape : tuple[int,int,int]
    specs : list[CompanionSpec]
    template : np.ndarray
    derotation_angles : Optional[np.ndarray]
    template_scale_factors : Union[np.ndarray,float,None]
        Scale factor relative to 1.0 being the average brightness
        of the primary over the observation, used to scale the
        template image to reflect particularly sharp or poor
        AO correction

    Returns
    -------
    outcube : np.ndarray
    '''

    outcube = np.zeros(shape, dtype=template.dtype)
    n_obs = shape[0]
    template = improc.shift2(template, 0, 0, output_shape=shape[1:])
    ft_template = fft.fft2(template)
    xfreqs = fft.fftfreq(shape[2])
    yfreqs = fft.fftfreq(shape[1])
    if template_scale_factors is None:
        template_scale_factors = np.ones(n_obs)
    if np.isscalar(template_scale_factors):
        template_scale_factors = np.repeat(np.array([template_scale_factors]),
                                           n_obs)
    if derotation_angles is None:
        derotation_angles = np.zeros(n_obs)
    for spec in specs:
        theta = np.deg2rad(90 + spec.pa_deg - derotation_angles)
        for i in range(n_obs):
            dx = spec.r_px * np.cos(theta[i])
            dy = spec.r_px * np.sin(theta[i])
            shifter = np.exp(2j * np.pi * ((-dx * xfreqs[np.newaxis, :]) +
                                           (-dy * yfreqs[:, np.newaxis])))
            cube_contribution = fft.ifft2(ft_template * shifter).real
            cube_contribution *= template_scale_factors[i] * spec.scale
            outcube[i] += cube_contribution
    return outcube
Ejemplo n.º 5
0
    def ft1D(x,y,*,axis=0,zero_DC=False):
        """Takes in x and y = y(x), and returns k and the Fourier transform 
        of y(x) -> f(k) along a single (1D) axis
        Handles all of the annoyances of fftshift and ifftshift, and gets the 
        normalization right
        Args:
            x (np.ndarray) : independent variable, must be 1D
            y (np.ndarray) : dependent variable, can be nD
        Kwargs:
            axis (int) : which axis to perform FFT
            zero_DC (bool) : if true, sets f(0) = 0
    """
        dx = x[1]-x[0]
        k = fftshift(fftfreq(x.size,d=dx))*2*np.pi
        fft_norm = dx

        shifted_x = ifftshift(x)
        if np.isclose(shifted_x[0],0):
            f = fft(ifftshift(y,axes=(axis)),axis=axis)*fft_norm
        else:
            f = fft(y,axis=axis)*fft_norm

        if zero_DC:
            nd_slice = [slice(None) for i in range(len(f.shape))]
            nd_slice[axis] = slice(0,1,1)
            nd_slice = tuple(nd_slice)
            f[nd_slice] = 0

        f = fftshift(f,axes=(axis))

        return k, f
Ejemplo n.º 6
0
    def ift1D(k,f,*,axis=0,zero_DC=False):
        """Takes in k and f = f(k), and returns x and the discrete Fourier 
        transform of f(k) -> y(x).
        Handles all of the annoyances of fftshift and ifftshift, and gets the 
        normalization right
        Args:
            x (np.ndarray): independent variable
            y (np.ndarray): dependent variable
        Kwargs:
            axis (int) : which axis to perform FFT
    """
        dk = k[1]-k[0]
        x = fftshift(fftfreq(k.size,d=dk))*2*np.pi
        ifft_norm = dk*k.size/(2*np.pi)

        shifted_k = ifftshift(k)
        if np.isclose(shifted_k[0],0):
            y = ifft(ifftshift(f,axes=(axis)),axis=axis)*ifft_norm
        else:
            y = ifft(f,axis=axis)*ifft_norm

        if zero_DC:
            nd_slice = [slice(None) for i in range(len(y.shape))]
            nd_slice[axis] = slice(0,1,1)
            nd_slice = tuple(nd_slice)
            y[nd_slice] = 0

        y = fftshift(y,axes=(axis))

        return x, y
Ejemplo n.º 7
0
    def set_efields(self,times_list,efields_list,centers_list,phase_discrimination,*,reset_rhos = True,
                    plot_fields = False):
        self.efield_times = times_list
        self.efields = efields_list
        self.centers = centers_list
        self.set_phase_discrimination(phase_discrimination)
        self.dts = []
        self.efield_frequencies = []
        if reset_rhos:
            self.rhos = dict()
        for t in times_list:
            if t.size == 1:
                dt = 1
                w = np.array([0])
            else:
                dt = t[1] - t[0]
                w = fftshift(fftfreq(t.size,d=dt))*2*np.pi
            self.dts.append(dt)
            self.efield_frequencies.append(w)

        self.dt = self.dts[0]

        if self.detection_type == 'polarization':
            try:
                self.local_oscillator = self.efields[-1].copy()
            except:
                self.local_oscillator = copy.deepcopy(self.efields[-1])

        for field in self.efields:
            if len(field) == 1:
                # M = 1 is the impulsive limit
                pass
            else:
                self.check_efield_resolution(field,plot_fields = plot_fields)
Ejemplo n.º 8
0
def logscale_normalization(spectra, srate=1., factor=20.):
    t_bins, f_bins = spectra.shape

    scale = np.linspace(0, 1, f_bins)**factor
    scale *= (f_bins - 1) / scale.max()
    scale = np.asarray(np.unique(np.round(scale)), dtype=np.int64)

    # create spectrogram with new freq bins
    spectra_ = np.zeros((t_bins, scale.size), dtype=np.complex128)
    for i in range(0, scale.size):
        if i < scale.size - 1:
            spectra_[:, i] = np.sum(spectra[:, scale[i]:scale[i + 1]], axis=1)
        else:
            spectra_[:, i] = np.sum(spectra[:, scale[i]:], axis=1)

    # list center freq of bins
    allfreqs = np.abs(fft.fftfreq(f_bins * 2, 1. / srate)[:f_bins + 1])

    freqs = []
    for i in range(0, len(scale)):
        if i == len(scale) - 1:
            freqs += [np.mean(allfreqs[scale[i]:])]
        else:
            freqs += [np.mean(allfreqs[scale[i]:scale[i + 1]])]

    return spectra_, np.asarray(freqs)
Ejemplo n.º 9
0
    def create_filter(order,
                      cutoff,
                      nyquist,
                      N,
                      ftype='fir',
                      output='freq',
                      shift=True):
        """
        Create a prototype filter.
        """
        h = firwin(order, cutoff, nyq=nyquist)

        if output == 'freq':
            w = fft.fftfreq(N)
            w *= (nyquist * 2)

            H = fft.fft(h, n=N, axis=-1, planner_effort='FFTW_ESTIMATE')

            if shift:
                return fft.fftshift(w), fft.fftshift(H)
            else:
                return w, H

        else:
            return h
Ejemplo n.º 10
0
def ft_shift2(image: np.ndarray,
              dy: float,
              dx: float,
              flux_tol: Union[None, float] = 1e-15,
              output_shape=None):
    """
    Fast Fourier subpixel shifting

    Parameters
    ----------
    dy : float
        Translation in +Y direction (i.e. a feature at (x, y) moves to (x, y + dy))
    dx : float
        Translation in +X direction (i.e. a feature at (x, y) moves to (x + dx, y))
    flux_tol : float
        Fractional flux change permissible
        ``(sum(output) - sum(image)) / sum(image) < flux_tol``
        (default: 1e-15)
    output_shape : tuple
        shape of output array (default: same as input)
    """
    if output_shape is None:
        output_shape = image.shape
    xfreqs = fft.fftfreq(output_shape[1])
    yfreqs = fft.fftfreq(output_shape[0])
    xform = fft.fft2(image, s=output_shape)
    if output_shape is not None:
        # compute center-to-center displacement such that
        # supplying dx == dy == 0.0 will be a no-op (aside
        # from changing shape)
        orig_ctr_x, orig_ctr_y = (image.shape[1] - 1) / 2, (image.shape[0] -
                                                            1) / 2
        new_ctr_x, new_ctr_y = (output_shape[1] - 1) / 2, (output_shape[0] -
                                                           1) / 2
        base_dx, base_dy = new_ctr_x - orig_ctr_x, new_ctr_y - orig_ctr_y
    else:
        base_dx = base_dy = 0
    modified_xform = xform * np.exp(2j * np.pi * (
        (-(dx + base_dx) * xfreqs)[np.newaxis, :] +
        (-(dy + base_dy) * yfreqs)[:, np.newaxis]))
    new_image = fft.ifft2(modified_xform).real
    frac_diff_flux = (np.sum(image) - np.sum(new_image)) / np.sum(image)
    if flux_tol is not None and frac_diff_flux > flux_tol:
        raise RuntimeError(
            f"Flux conservation violated by {frac_diff_flux} fractional difference (more than {flux_tol})"
        )
    return new_image
Ejemplo n.º 11
0
    def plot2d_fft(self,
                   *,
                   delay_time_start=1,
                   create_figure=True,
                   color_range='auto',
                   subtract_DC=True,
                   draw_colorbar=True,
                   frequency_range=[-1000, 1000],
                   normalize=False,
                   phase=False,
                   save_fig=True,
                   wT_frequency_range='auto'):
        w_ind = np.where((self.w > frequency_range[0])
                         & (self.w < frequency_range[1]))[0]
        w = self.w[w_ind]
        sig = self.signal_vs_delay_times[w_ind, :]

        delay_time_indices = np.where(self.delay_times > delay_time_start)[0]
        delay_times = self.delay_times[delay_time_indices]
        sig = sig[:, delay_time_indices]
        if normalize:
            sig /= np.dot(self.dipoles, self.dipoles)**2
        wT = fftshift(
            fftfreq(delay_times.size,
                    d=(delay_times[1] - delay_times[0]))) * 2 * np.pi
        sig_fft = fft(sig, axis=1)
        if subtract_DC:
            sig_fft[:, 0] = 0
        sig_fft = fftshift(sig_fft, axes=(1))

        ww, wTwT = np.meshgrid(wT, w)

        if create_figure:
            plt.figure()

        if phase:
            plt.title('Phase')
            plot_sig = np.arctan2(np.imag(sig_fft), np.real(sig_fft))
        else:
            plt.title('Magnitude')
            plot_sig = np.abs(sig_fft)
        if color_range == 'auto':
            plt.pcolormesh(ww, wTwT, plot_sig)
        else:
            plt.pcolormesh(ww,
                           wTwT,
                           plot_sig,
                           vmin=color_range[0],
                           vmax=color_range[1])
        if draw_colorbar:
            plt.colorbar()
        plt.xlabel('$\omega_T$ ($\omega_0$)', fontsize=16)
        plt.ylabel('Detection Frequency ($\omega_0$)', fontsize=16)
        if wT_frequency_range == 'auto':
            plt.xlim([0, np.max(wT)])
        else:
            plt.xlim(wT_frequency_range)
        if save_fig:
            plt.savefig(self.base_path + 'TA_spectra_fft')
Ejemplo n.º 12
0
 def integrated_ft(self,delay_time_start = 1,delay_time_stop = 300):
     delay_time_indices = np.where((self.delay_times > delay_time_start) & (self.delay_times < delay_time_stop))[0]
     delay_times = self.delay_times[delay_time_indices]
     sig = self.signal_vs_delay_times[:,delay_time_indices]
     integrated = np.trapz(sig,x=self.TA.w,axis=0)
     w_T = fftshift(fftfreq(delay_times.size,d=(delay_times[1] - delay_times[0])))*2*np.pi
     integrated_fft = fft(integrated)
     integrated_fft[0] = 0
     integrated_fft = fftshift(integrated_fft)
     return w_T, integrated_ft
Ejemplo n.º 13
0
 def _gen_kr(self):
     """Internal utiltiy to generate coordinate system and other internal
     parameters"""
     k = fftfreq(self.size, self.res)
     kxx, kyy = np.meshgrid(k, k)
     self._kr, self._phi = cart2pol(kyy, kxx)
     # kmag is the radius of the spherical shell of the OTF
     self._kmag = self.ni / self.wl
     # because the OTF only exists on a spherical shell we can calculate
     # a kz value for any pair of kx and ky values
     self._kz = psqrt(self._kmag**2 - self._kr**2)
Ejemplo n.º 14
0
    def set_t(self,optical_dephasing_rate,*,dt='auto'):
        """Sets the time grid upon which all frequency-detected signals will
be calculated on
"""
        max_pos_t = int(self.gamma_res/optical_dephasing_rate)
        if dt == 'auto':
            dt = self.dts[-1] # signal detection bandwidth determined by local oscillator
        self.t = np.arange(-max_pos_t,max_pos_t+dt/2,dt)
        # if self.t.size % 2:
        #     self.t = self.t[:-1]
        self.w = fftshift(fftfreq(self.t.size,d=dt)*2*np.pi)
Ejemplo n.º 15
0
def _update_fft_axes(axes, idx, shape, omitlast, ffunc):
    for i in idx[:-1]:
        __update_axes_label(axes, i)
        axes[i].attrs['shift'] = axes[i].min  # save lower bound. value of axes
        axes[i].ax = fftmod.fftshift(fftmod.fftfreq(shape[i], d=axes[i].increment)) * 2 * np.pi
    # the last dimension needs special care for rfft
    i = idx[-1]
    __update_axes_label(axes, i)
    axes[i].attrs['shift'] = axes[i].min  # save lower bound. value of axes
    if omitlast:
        axes[i].ax = ffunc(shape[i], d=axes[i].increment) * 2 * np.pi
    else:
        axes[i].ax = fftmod.fftshift(ffunc(shape[i], d=axes[i].increment)) * 2 * np.pi
Ejemplo n.º 16
0
    def set_t(self, optical_dephasing_rate, *, dt='auto'):
        """Sets the time grid upon which all frequency-detected signals will
be calculated on
"""
        max_pos_t = int(self.gamma_res / optical_dephasing_rate)
        max_efield_t = max([np.max(u) for u in self.efield_times]) * 1.05
        max_pos_t = max(max_pos_t, max_efield_t)
        if dt == 'auto':
            dt = self.dts[
                -1]  # signal detection bandwidth determined by local oscillator
        n = int(max_pos_t / dt)
        self.t = np.arange(-n, n + 1, 1) * dt
        self.w = fftshift(fftfreq(self.t.size, d=dt) * 2 * np.pi)
Ejemplo n.º 17
0
 def _gen_kr(self):
     """Internal utility function to generate internal state"""
     # generate internal kspace coordinates
     k = fftfreq(self.size, self.res)
     kz = fftfreq(self.zsize, self.zres)
     k_tot = np.meshgrid(kz, k, indexing="ij")
     # calculate r
     kr = norm(k_tot, axis=0)
     # calculate the radius of the spherical shell in k-space
     self.kmag = kmag = self.ni / self.wl
     # determine k-space pixel size
     dk, dkz = k[1] - k[0], kz[1] - kz[0]
     # save output for user
     self.dk, self.dkz = dk, dkz
     # determine the min value for kz given the NA and wavelength
     kz_min = np.sqrt(kmag**2 - (self.na / self.wl)**2)
     # make sure we're not crazy
     assert kz_min >= 0, "Something went horribly wrong"
     # if the user gave us different z and x/y res we need to calculate
     # the positional "error" in k-space to draw the spherical shell
     if dk != dkz:
         with np.errstate(invalid='ignore'):
             dd = np.array((dkz, dk)).reshape(2, 1, 1)
             dkr = norm(np.array(k_tot) * dd, axis=0) / kr
         # we know the origin is zero so replace it
         dkr[0, 0] = 0.0
     else:
         dkr = dk
     if self.dual:
         # if we want dual objectives we need two spherical shells
         kzz = abs(k_tot[0])
     else:
         kzz = k_tot[0]
     # calculate the points on the spherical shell, save them and the
     # corresponding kz, ky and kx coordinates
     self.valid_points = np.logical_and(
         abs(kr - kmag) < dkr, kzz > kz_min + dkr)
     self.kzz, self.krr = [k[self.valid_points] for k in k_tot]
Ejemplo n.º 18
0
    def generate_white_noise(self):
        """Generate a white noise with the relevant power spectrum.

        Returns
        -------
        white_noise : np.ndarray
        """
        # Compute the k grid
        d = self.Lbox / self.dimensions / (2 * np.pi)
        all_k = [fft.fftfreq(self.dimensions, d=d)] * (self.Ndim - 1) + [
            fft.rfftfreq(self.dimensions, d=d)
        ]

        self.kgrid = kgrid = np.array(np.meshgrid(*all_k, indexing="ij"))
        self.knorm = knorm = np.sqrt(np.sum(kgrid**2, axis=0))

        # Compute Pk
        Pk = np.zeros_like(knorm)
        mask = knorm > 0
        Pk[mask] = self.Pk(knorm[mask] * 2)

        # Compute white noise (in Fourier space)
        mu = np.random.standard_normal([self.dimensions] * self.Ndim)
        muk = fft.rfftn(mu)
        deltak = muk * np.sqrt(Pk)

        # Compute field in real space
        white_noise = fft.irfftn(deltak)

        # Normalize variance
        deltak_smoothed = deltak * self.filter.W(knorm)
        field = fft.irfftn(deltak_smoothed)
        std = field.std()

        self.white_noise_fft = deltak * self.sigma8 / std
        self.white_noise = white_noise * self.sigma8 / std

        return self.white_noise
    def _precompute(self):
        # Computations that need only be done once per dataset.
        self._nevts = self._v0.shape[0]  # Number of events

        # *NORMALIZE v0*
        # Reshape to facilitate max_vz normalization using numpy broadcast rules.
        v0 = np.moveaxis(self._v0, 0, -1)
        # Normalize each event signal by the maximum z-component amplitude.
        # We perform this succinctly using numpy multidimensional broadcasting rules.
        self._max_vz = np.abs(v0[1, :, :]).max(axis=0)
        self._max_vz.flags.writeable = False
        v0 = v0 / self._max_vz
        # Reshape back to original shape.
        self._v0 = np.moveaxis(v0, -1, 0)
        self._v0.flags.writeable = False

        # Transform v0 to the spectral domain using real FFT
        self._fv0 = fft(self._v0, axis=-1)
        self._fv0.flags.writeable = False

        # Compute discrete frequencies
        self._w = 2 * np.pi * fftfreq(self._npts, self._dt)
        self._w.flags.writeable = False
Ejemplo n.º 20
0
    def set_efields(self,
                    times_list,
                    efields_list,
                    centers_list,
                    phase_discrimination,
                    *,
                    reset_psis=True,
                    plot_fields=False):
        self.efield_times = times_list
        self.efields = efields_list
        self.centers = centers_list
        self.set_phase_discrimination(phase_discrimination)
        self.dts = []
        self.efield_frequencies = []
        if reset_psis:
            self.psis = dict()
        for t in times_list:
            if t.size == 1:
                dt = 1
                w = np.array([0])
            else:
                dt = t[1] - t[0]
                w = fftshift(fftfreq(t.size, d=dt)) * 2 * np.pi
            self.dts.append(dt)
            self.efield_frequencies.append(w)
            self.heaviside_convolve_list.append(HeavisideConvolve(t.size))

        self.dt = self.dts[0]

        # Initialize unperturbed wavefunction
        self.set_psi0(self.initial_state)

        if self.detection_type == 'polarization' or 'integrated_polarization':
            try:
                self.local_oscillator = self.efields[-1].copy()
            except:
                self.local_oscillator = copy.deepcopy(self.efields[-1])
Ejemplo n.º 21
0
def fftFromLiteMap(liteMap, applySlepianTaper=False, nresForSlepian=3.0, threads=1):
    """
    Create an fft2D object from a liteMap.
    
    Parameters
    ----------
    liteMap : liteMap.liteMap 
        The map object whose fft is being taken.
    applySlepianTaper : bool, optional 
        If ``True``, apply the lowest order taper (to minimize edge-leakage).
        Default is ``False``.
    nresForSlepian : float, optional 
        If ``applySlepianTaper`` = ``True``, this specifies the resolution of 
        the taper to use. Default is 3.0.
    threads : int, optional
        Number of threads to use in pyFFTW calculations. Default is 1.
        
    Returns
    -------
    ft : fftTools.fft2D
        The fft2D object corresponding the input liteMap.
    """
    ft = fft2D()
        
    ft.Nx = liteMap.Nx
    ft.Ny = liteMap.Ny
    trace.issue("flipper.fftTools", 1, "Taking FFT of map with (Ny, Nx)= (%f, %f)" %(ft.Ny,ft.Nx))
    
    ft.pixScaleX = liteMap.pixScaleX 
    ft.pixScaleY = liteMap.pixScaleY
    
    lx =  2*numpy.pi*fftfreq(ft.Nx, d=ft.pixScaleX)
    ly =  2*numpy.pi*fftfreq(ft.Ny, d=ft.pixScaleY)
    
    ix = numpy.mod(numpy.arange(ft.Nx*ft.Ny), ft.Nx)
    iy = numpy.arange(ft.Nx*ft.Ny)/ft.Nx
    
    modLMap        = numpy.zeros([ft.Ny, ft.Nx])
    modLMap[iy,ix] = numpy.sqrt(lx[ix]**2 + ly[iy]**2)
    ft.modLMap  =  modLMap
    
    ft.lx = lx
    ft.ly = ly
    ft.ix = ix
    ft.iy = iy
    ft.thetaMap = numpy.zeros([ft.Ny, ft.Nx])
    ft.thetaMap[iy[:], ix[:]] = numpy.arctan2(ly[iy[:]], lx[ix[:]])
    ft.thetaMap *= 180./numpy.pi
    
    mp = liteMap.data.copy()
    taper = mp.copy()*0. + 1.0

    if (applySlepianTaper):
        try:
            f = open(taperDir + os.path.sep + 'taper_Ny%d_Nx%d_Nres%3.1f' %(ft.Ny, ft.Nx, nresForSlepian))
            taper = pickle.load(f)
            f.close()
        except:
            taper = slepianTaper00(ft.Nx, ft.Ny,nresForSlepian)
            f = open(taperDir + os.path.sep + 'taper_Ny%d_Nx%d_Nres%3.1f'%(ft.Ny, ft.Nx, nresForSlepian), mode="w")
            pickle.dump(taper,f)
            f.close()
    
    if have_pyFFTW: 
        ft.kMap = fft2(mp*taper, threads=threads)
    else:
        ft.kMap = fft2(mp*taper)
    del mp, modLMap, lx, ly
    return ft
Ejemplo n.º 22
0
def create_filter(order,
                  cutoff,
                  nyquist,
                  N,
                  ftype='fir',
                  output='freq',
                  shift=True):
    """ Create a lowpass FIR filter. This function is meant to create only the prototype filter,
    where highpass, bandpass, or bandstop can all be transformed from the lowpass filter.

    Parameters:
    -----------
    order: int
        The filter order. The number of taps for an FIR filter.

    cutoff: float
        The cutoff frequency of the filter.

    nyquist: int or float
        This parameter is half the sampling rate.

    ftype: str
        Declare the filter to be an FIR ('fir') or an IIR ('iir').

    output: str
        Declare the return of the function to be in 'time' domain or 'freq' domain.

    shift: bool
        Declare if fftshift is applied to the FFT filter coeffiecients or not.

    Returns:
    --------
    If output is 'freq'
        w: ndarray
            Frequencies corresponding to frequency components. The units is the
            same as the ones chosen for the Nyquist rate.

        H: ndarray, complex
            The values of the FFT of the filter coefficients.

    if output is 'time'
        h: ndarray
            The values of the filter coefficients
    """
    if order > N:
        raise ValueError(
            "The order of the filter should not be longer than the length for FFT (binsize)."
        )

    if cutoff >= nyquist:
        raise ValueError(
            "The cutoff frequency must be at least 2 times smaller than the Nyquist rate."
        )

    if output not in ['time', 'freq']:
        raise ValueError("'output' must be either 'time' or 'freq'!")

    h = firwin(order, cutoff, nyq=nyquist)

    if output == 'freq':
        w = fft.fftfreq(N)
        w *= (nyquist * 2)

        H = fft.fft(h, n=N, axis=-1, planner_effort='FFTW_ESTIMATE')

        if shift:
            return fft.fftshift(w), fft.fftshift(H)
        else:
            return w, H

    elif output == 'time':
        return 1, h
Ejemplo n.º 23
0
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan,
         nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol,
         dedisperse='incoherent',
         do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse in ['incoherent', 'by-channel'] and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("Cannot coherently dedisperse channelized data.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt*ntint//ntw//oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell()/fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'
                  .format(nskip, nskip*fh.blocksize))

    dt1 = (1./samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2.*dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        else:  # real data
            freq = fedge + tb * rfftfreq(nchan*2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint*nchan*2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample)

        fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis]

    # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    # By taking only up to nchan, we remove the top channel at the Nyquist
    # frequency for real, unchannelized data.
    ifreq = freq[:nchan].ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1./freq_in**2 - 1./fref**2)

    if need_fine_channels:
        # pre-calculate required turns due to dispersion.
        #
        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1./_fref-1./fcoh)**2) * u.cycle
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt-1)//mpi_size + 1
    start_block = mpi_rank*size_per_node
    end_block = min((mpi_rank+1)*size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'
                  .format(mpi_rank, mpi_size, j+1, nt,
                          j-start_block+1, end_block-start_block,
                          (tstart+dtsample*j*ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip+j)*fh.blocksize),
                                      fh.blocksize)
        except(EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items"
                  .format(mpi_rank, mpi_size, raw.size), end="")

        if npol == 2 and raw.dtype.fields is not None:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:              # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if dedisperse == 'incoherent' and oversample > 1:
            raw = ifft(raw, axis=1, **_fftargs).reshape(-1, nchan, npol)
            raw = fft(raw, axis=1, **_fftargs)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})"
                      .format(np.count_nonzero(~ok), ok.size), end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        # For pre-channelized data, data are always complex,
        # and should have shape (ntint, nchan, npol).
        # For baseband data, we wish to get to the same shape for
        # incoherent or by_channel, or just to fully channelized for coherent.
        if fh.nchan == 1:
            # If we need coherent dedispersion, do FT of whole thing,
            # otherwise to output channels, mimicking pre-channelized data.
            if raw.dtype.kind == 'c':  # complex data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan
                vals = fft(vals.reshape(-1, nsamp, npol), axis=1,
                           **_fftargs)
            else:  # real data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan * 2
                vals = rfft(vals.reshape(-1, nsamp, npol), axis=1,
                            **_rfftargs)
                # Sadly, the way data are stored depends on what FFT routine
                # one is using.  We cannot deal with scipy's.
                if vals.dtype.kind == 'f':
                    raise TypeError("Can no longer deal with scipy's format "
                                    "for storing FTs of real data.")

        if fedge_at_top:
            # take complex conjugate to ensure by-channel de-dispersion is
            # applied correctly.
            # This needs to be done for ARO data, since we are in 2nd Nyquist
            # zone; not clear it is needed for other telescopes.
            np.conj(vals, out=vals)

        # Now we coherently dedisperse, either all of it or by channel.
        if need_fine_channels:
            # for by_channel, we have vals.shape=(ntint, nchan, npol),
            # and want to FT over ntint to get fine channels;
            if vals.shape[0] > 1:
                fine = fft(vals, axis=0, **_fftargs)
            else:
                # for coherent, we just reshape:
                # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
                fine = vals.reshape(-1, 1, npol)

            # Dedisperse.
            fine *= dd_coh

            # Still have fine.shape=(ntint, nchan, npol),
            # w/ nchan=1 for coherent.
            if fine.shape[1] > 1 or raw.dtype.kind == 'c':
                vals = ifft(fine, axis=0, **_fftargs)
            else:
                vals = irfft(fine, axis=0, **_rfftargs)

            if fine.shape[1] == 1 and nchan > 1:
                # final FT to get requested channels
                if vals.dtype.kind == 'f':
                    vals = vals.reshape(-1, nchan*2, npol)
                    vals = rfft(vals, axis=1, **_rfftargs)
                else:
                    vals = vals.reshape(-1, nchan, npol)
                    vals = fft(vals, axis=1, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4,), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real*p1.real + p0.imag*p1.imag
            power[..., 2] = p0.imag*p1.real - p0.real*p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        # current sample positions and corresponding time in stream
        isr = j*(ntint // oversample) + np.arange(ntint // oversample)
        tsr = (isr*dtsample*oversample)[:, np.newaxis]

        if rfi_filter_power is not None:
            power = rfi_filter_power(power, tsr.squeeze())
            print("... power RFI", end="")

        # correct for delay if needed
        if dedisperse in ['incoherent', 'by-channel']:
            # tsample.shape=(ntint/oversample, nchan_in)
            tsr = tsr - dt

        if do_waterfall:
            # # loop over corresponding positions in waterfall
            # for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):
            #     if iw < nwsize:  # add sum of corresponding samples
            #         waterfall[iw, :] += np.sum(power[isr//ntw == iw],
            #                                    axis=0)[ifreq]
            iw = np.round((tsr / dtsample / oversample).to(1)
                          .value / ntw).astype(int)
            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)]
                iwk = np.clip(iwk, 0, nwsize-1, out=iwk)
                iwkmin = iwk.min()
                iwkmax = iwk.max()+1
                for ipow in range(npol**2):
                    waterfall[iwkmin:iwkmax, k, ipow] += np.bincount(
                        iwk-iwkmin, power[:, kfreq, ipow], iwkmax-iwkmin)
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j*ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times and cycles since start time of observation.
            tsample = tstart + tsr
            phase = (phasepol(tsample.to(u.s).value.ravel())
                     .reshape(tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase*ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1
                                 else kfreq // oversample)]
                # sum and count samples by phase bin
                for ipow in range(npol**2):
                    foldspec[ibin, k, :, ipow] += np.bincount(
                        iph, power[:, kfreq, ipow], ngate)
                icount[ibin, k, :] += np.bincount(
                    iph, power[:, kfreq, 0] != 0., ngate).astype(np.int32)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
Ejemplo n.º 24
0
            freq = fedge + tb * rfftfreq(nchan*2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint*nchan*2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
<<<<<<< HEAD
            freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample)

        fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis]

    # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    # By taking only up to nchan, we remove the top channel at the Nyquist
    # frequency for real, unchannelized data.
    ifreq = freq[:nchan].ravel().argsort()

=======
            freq = (freq_in[:, np.newaxis] + tb * u.Hz *
                    rfftfreq(oversample*2, dtsample.value/2.)[::2])
        # same as fine = rfftfreq(2*ntint, dtsample.value/2.)[::2]
        #fcoh = freq_in[np.newaxis, :] + tb * u.Hz * rfftfreq(
        #    ntint*2, dtsample.value/2.)[::2, np.newaxis]  #Old, incorrect by-channel
        fcoh = freq_in - u.Hz * np.fft.fftfreq(ntint, dtsample.value)[:,np.newaxis]
Ejemplo n.º 25
0
def fold(fh, comm, samplerate, fedge, fedge_at_top, nchan,
         nt, ntint, ngate, ntbin, ntw, dm, fref, phasepol,
         dedisperse='incoherent',
         do_waterfall=True, do_foldspec=True, verbose=True,
         progress_interval=100, rfi_filter_raw=None, rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse == 'by-channel' and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("Cannot coherently dedisperse channelized data.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt*ntint//ntw//oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell()/fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'
                  .format(nskip, nskip*fh.blocksize))

    dt1 = (1./samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2.*dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan*ntint, 2.*dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        else:  # real data
            freq = fedge + tb * rfftfreq(nchan*2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint*nchan*2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
Ejemplo n.º 26
0
    def fillWithGaussianRandomField(self,
                                    ell,
                                    Cell,
                                    bufferFactor=1,
                                    threads=1):
        """
        Generate a Gaussian random field from an input power spectrum specified 
        as ell, Cell.
        
        Notes
        -----
        BufferFactor = 1 means the map will have periodic boundary function, while
        BufferFactor > 1 means the map will be genrated on a patch bufferFactor 
        times larger in each dimension and then cut out so as to have 
        non-periodic boundary conditions.
        
        Fills the data field of the map with the GRF realization.
        """
        ft = fftTools.fftFromLiteMap(self, threads=threads)
        Ny = self.Ny * bufferFactor
        Nx = self.Nx * bufferFactor

        bufferFactor = int(bufferFactor)

        realPart = numpy.zeros([Ny, Nx])
        imgPart = numpy.zeros([Ny, Nx])

        ly = fftfreq(Ny, d=self.pixScaleY) * (2 * numpy.pi)
        lx = fftfreq(Nx, d=self.pixScaleX) * (2 * numpy.pi)
        #print ly
        modLMap = numpy.zeros([Ny, Nx])
        iy, ix = numpy.mgrid[0:Ny, 0:Nx]
        modLMap[iy, ix] = numpy.sqrt(ly[iy]**2 + lx[ix]**2)

        s = splrep(ell, Cell, k=3)

        ll = numpy.ravel(modLMap)
        kk = splev(ll, s)
        id = numpy.where(ll > ell.max())
        kk[id] = 0.
        #add a cosine ^2 falloff at the very end
        # id2 = numpy.where( (ll> (ell.max()-500)) & (ll<ell.max()))
        # lEnd = ll[id2]
        # kk[id2] *= numpy.cos((lEnd-lEnd.min())/(lEnd.max() -lEnd.min())*numpy.pi/2)

        #pylab.loglog(ll,kk)

        area = Nx * Ny * self.pixScaleX * self.pixScaleY
        p = numpy.reshape(kk, [Ny, Nx]) / area * (Nx * Ny)**2

        realPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)
        imgPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)

        kMap = realPart + 1j * imgPart

        if have_pyFFTW:
            data = numpy.real(ifft2(kMap, threads=threads))
        else:
            data = numpy.real(ifft2(kMap))
        b = bufferFactor
        self.data = data[(b - 1) / 2 * self.Ny:(b + 1) / 2 * self.Ny,
                         (b - 1) / 2 * self.Nx:(b + 1) / 2 * self.Nx]
Ejemplo n.º 27
0
def fold(fh,
         comm,
         samplerate,
         fedge,
         fedge_at_top,
         nchan,
         nt,
         ntint,
         ngate,
         ntbin,
         ntw,
         dm,
         fref,
         phasepol,
         dedisperse='incoherent',
         do_waterfall=True,
         do_foldspec=True,
         verbose=True,
         progress_interval=100,
         rfi_filter_raw=None,
         rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse in ['incoherent', 'by-channel'] and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("Cannot coherently dedisperse channelized data.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt * ntint // ntw // oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell() / fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'.format(
                nskip, nskip * fh.blocksize))

    dt1 = (1. / samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2. * dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan * ntint, 2. * dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        else:  # real data
            freq = fedge + tb * rfftfreq(nchan * 2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint * nchan * 2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample)

        fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis]

    # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    # By taking only up to nchan, we remove the top channel at the Nyquist
    # frequency for real, unchannelized data.
    ifreq = freq[:nchan].ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1. / freq_in**2 - 1. / fref**2)

    if need_fine_channels:
        # pre-calculate required turns due to dispersion.
        #
        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1. / _fref - 1. / fcoh)**2) * u.cycle
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt - 1) // mpi_size + 1
    start_block = mpi_rank * size_per_node
    end_block = min((mpi_rank + 1) * size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'.format(
                      mpi_rank, mpi_size, j + 1, nt, j - start_block + 1,
                      end_block - start_block,
                      (tstart +
                       dtsample * j * ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip + j) * fh.blocksize),
                                      fh.blocksize)
        except (EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items".format(mpi_rank, mpi_size,
                                                      raw.size),
                  end="")

        if npol == 2 and raw.dtype.fields is not None:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:  # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if dedisperse == 'incoherent' and oversample > 1:
            raw = ifft(raw, axis=1, **_fftargs).reshape(-1, nchan, npol)
            raw = fft(raw, axis=1, **_fftargs)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})".format(
                    np.count_nonzero(~ok), ok.size),
                      end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        # For pre-channelized data, data are always complex,
        # and should have shape (ntint, nchan, npol).
        # For baseband data, we wish to get to the same shape for
        # incoherent or by_channel, or just to fully channelized for coherent.
        if fh.nchan == 1:
            # If we need coherent dedispersion, do FT of whole thing,
            # otherwise to output channels, mimicking pre-channelized data.
            if raw.dtype.kind == 'c':  # complex data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan
                vals = fft(vals.reshape(-1, nsamp, npol), axis=1, **_fftargs)
            else:  # real data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan * 2
                vals = rfft(vals.reshape(-1, nsamp, npol), axis=1, **_rfftargs)
                # Sadly, the way data are stored depends on what FFT routine
                # one is using.  We cannot deal with scipy's.
                if vals.dtype.kind == 'f':
                    raise TypeError("Can no longer deal with scipy's format "
                                    "for storing FTs of real data.")

        if fedge_at_top:
            # take complex conjugate to ensure by-channel de-dispersion is
            # applied correctly.
            # This needs to be done for ARO data, since we are in 2nd Nyquist
            # zone; not clear it is needed for other telescopes.
            np.conj(vals, out=vals)

        # Now we coherently dedisperse, either all of it or by channel.
        if need_fine_channels:
            # for by_channel, we have vals.shape=(ntint, nchan, npol),
            # and want to FT over ntint to get fine channels;
            if vals.shape[0] > 1:
                fine = fft(vals, axis=0, **_fftargs)
            else:
                # for coherent, we just reshape:
                # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
                fine = vals.reshape(-1, 1, npol)

            # Dedisperse.
            fine *= dd_coh

            # Still have fine.shape=(ntint, nchan, npol),
            # w/ nchan=1 for coherent.
            if fine.shape[1] > 1 or raw.dtype.kind == 'c':
                vals = ifft(fine, axis=0, **_fftargs)
            else:
                vals = irfft(fine, axis=0, **_rfftargs)

            if fine.shape[1] == 1 and nchan > 1:
                # final FT to get requested channels
                if vals.dtype.kind == 'f':
                    vals = vals.reshape(-1, nchan * 2, npol)
                    vals = rfft(vals, axis=1, **_rfftargs)
                else:
                    vals = vals.reshape(-1, nchan, npol)
                    vals = fft(vals, axis=1, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4, ), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real * p1.real + p0.imag * p1.imag
            power[..., 2] = p0.imag * p1.real - p0.real * p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        # current sample positions and corresponding time in stream
        isr = j * (ntint // oversample) + np.arange(ntint // oversample)
        tsr = (isr * dtsample * oversample)[:, np.newaxis]

        if rfi_filter_power is not None:
            power = rfi_filter_power(power, tsr.squeeze())
            print("... power RFI", end="")

        # correct for delay if needed
        if dedisperse in ['incoherent', 'by-channel']:
            # tsample.shape=(ntint/oversample, nchan_in)
            tsr = tsr - dt

        if do_waterfall:
            # # loop over corresponding positions in waterfall
            # for iw in range(isr[0]//ntw, isr[-1]//ntw + 1):
            #     if iw < nwsize:  # add sum of corresponding samples
            #         waterfall[iw, :] += np.sum(power[isr//ntw == iw],
            #                                    axis=0)[ifreq]
            iw = np.round(
                (tsr / dtsample / oversample).to(1).value / ntw).astype(int)
            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)]
                iwk = np.clip(iwk, 0, nwsize - 1, out=iwk)
                iwkmin = iwk.min()
                iwkmax = iwk.max() + 1
                for ipow in range(npol**2):
                    waterfall[iwkmin:iwkmax, k,
                              ipow] += np.bincount(iwk - iwkmin,
                                                   power[:, kfreq, ipow],
                                                   iwkmax - iwkmin)
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j * ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times and cycles since start time of observation.
            tsample = tstart + tsr
            phase = (phasepol(tsample.to(u.s).value.ravel()).reshape(
                tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase * ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq //
                                 oversample)]
                # sum and count samples by phase bin
                for ipow in range(npol**2):
                    foldspec[ibin, k, :,
                             ipow] += np.bincount(iph, power[:, kfreq, ipow],
                                                  ngate)
                icount[ibin,
                       k, :] += np.bincount(iph, power[:, kfreq, 0] != 0.,
                                            ngate).astype(np.int32)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
Ejemplo n.º 28
0
    def fillWithGRFFromTemplate(self, twodPower, bufferFactor=1, threads=1):
        """
        Generate a Gaussian random field from an input power spectrum 
        specified as a 2d powerMap
        
        Notes
        -----
        BufferFactor = 1 means the map will have periodic boundary function, while
        BufferFactor > 1 means the map will be genrated on a patch bufferFactor 
        times larger in each dimension and then cut out so as to have 
        non-periodic boundary conditions.
        
        Fills the data field of the map with the GRF realization.
        """

        ft = fftTools.fftFromLiteMap(self, threads=threads)
        Ny = self.Ny * bufferFactor
        Nx = self.Nx * bufferFactor

        bufferFactor = int(bufferFactor)
        assert (bufferFactor >= 1)

        realPart = numpy.zeros([Ny, Nx])
        imgPart = numpy.zeros([Ny, Nx])

        ly = fftfreq(Ny, d=self.pixScaleY) * (2 * numpy.pi)
        lx = fftfreq(Nx, d=self.pixScaleX) * (2 * numpy.pi)
        #print ly
        modLMap = numpy.zeros([Ny, Nx])
        iy, ix = numpy.mgrid[0:Ny, 0:Nx]
        modLMap[iy, ix] = numpy.sqrt(ly[iy]**2 + lx[ix]**2)

        # divide out area factor
        area = twodPower.Nx * twodPower.Ny * twodPower.pixScaleX * twodPower.pixScaleY
        twodPower.powerMap *= (twodPower.Nx * twodPower.Ny)**2 / area

        if bufferFactor > 1 or twodPower.Nx != Nx or twodPower.Ny != Ny:

            lx_shifted = fftshift(twodPower.lx)
            ly_shifted = fftshift(twodPower.ly)
            twodPower_shifted = fftshift(twodPower.powerMap)

            f_interp = interp2d(lx_shifted, ly_shifted, twodPower_shifted)

            # ell = numpy.ravel(twodPower.modLMap)
            # Cell = numpy.ravel(twodPower.powerMap)
            # print ell
            # print Cell
            # s = splrep(ell,Cell,k=3)
            #
            #
            # ll = numpy.ravel(modLMap)
            # kk = splev(ll,s)

            kk = f_interp(fftshift(lx), fftshift(ly))
            kk = ifftshift(kk)

            # id = numpy.where(modLMap > ell.max())
            # kk[id] = 0.
            # add a cosine ^2 falloff at the very end
            # id2 = numpy.where( (ll> (ell.max()-500)) & (ll<ell.max()))
            # lEnd = ll[id2]
            # kk[id2] *= numpy.cos((lEnd-lEnd.min())/(lEnd.max() -lEnd.min())*numpy.pi/2)

            # pylab.loglog(ll,kk)

            area = Nx * Ny * self.pixScaleX * self.pixScaleY
            #p = numpy.reshape(kk,[Ny,Nx]) /area * (Nx*Ny)**2
            p = kk  #/ area * (Nx*Ny)**2
        else:
            area = Nx * Ny * self.pixScaleX * self.pixScaleY
            p = twodPower.powerMap  #/area*(Nx*Ny)**2

        realPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)
        imgPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)

        kMap = realPart + 1j * imgPart
        if have_pyFFTW:
            data = numpy.real(ifft2(kMap, threads=threads))
        else:
            data = numpy.real(ifft2(kMap))

        b = bufferFactor
        self.data = data[(b - 1) / 2 * self.Ny:(b + 1) / 2 * self.Ny,
                         (b - 1) / 2 * self.Nx:(b + 1) / 2 * self.Nx]
Ejemplo n.º 29
0
    def fillWithGRFFromTemplate(self, twodPower, bufferFactor=1, threads=1):
        """
        Generate a Gaussian random field from an input power spectrum 
        specified as a 2d powerMap
        
        Notes
        -----
        BufferFactor = 1 means the map will have periodic boundary function, while
        BufferFactor > 1 means the map will be genrated on a patch bufferFactor 
        times larger in each dimension and then cut out so as to have 
        non-periodic boundary conditions.
        
        Fills the data field of the map with the GRF realization.
        """

        ft = fftTools.fftFromLiteMap(self, threads=threads)
        Ny = self.Ny * bufferFactor
        Nx = self.Nx * bufferFactor

        bufferFactor = int(bufferFactor)
        assert bufferFactor >= 1

        realPart = numpy.zeros([Ny, Nx])
        imgPart = numpy.zeros([Ny, Nx])

        ly = fftfreq(Ny, d=self.pixScaleY) * (2 * numpy.pi)
        lx = fftfreq(Nx, d=self.pixScaleX) * (2 * numpy.pi)
        # print ly
        modLMap = numpy.zeros([Ny, Nx])
        iy, ix = numpy.mgrid[0:Ny, 0:Nx]
        modLMap[iy, ix] = numpy.sqrt(ly[iy] ** 2 + lx[ix] ** 2)

        # divide out area factor
        area = twodPower.Nx * twodPower.Ny * twodPower.pixScaleX * twodPower.pixScaleY
        twodPower.powerMap *= (twodPower.Nx * twodPower.Ny) ** 2 / area

        if bufferFactor > 1 or twodPower.Nx != Nx or twodPower.Ny != Ny:

            lx_shifted = fftshift(twodPower.lx)
            ly_shifted = fftshift(twodPower.ly)
            twodPower_shifted = fftshift(twodPower.powerMap)

            f_interp = interp2d(lx_shifted, ly_shifted, twodPower_shifted)

            # ell = numpy.ravel(twodPower.modLMap)
            # Cell = numpy.ravel(twodPower.powerMap)
            # print ell
            # print Cell
            # s = splrep(ell,Cell,k=3)
            #
            #
            # ll = numpy.ravel(modLMap)
            # kk = splev(ll,s)

            kk = f_interp(fftshift(lx), fftshift(ly))
            kk = ifftshift(kk)

            # id = numpy.where(modLMap > ell.max())
            # kk[id] = 0.
            # add a cosine ^2 falloff at the very end
            # id2 = numpy.where( (ll> (ell.max()-500)) & (ll<ell.max()))
            # lEnd = ll[id2]
            # kk[id2] *= numpy.cos((lEnd-lEnd.min())/(lEnd.max() -lEnd.min())*numpy.pi/2)

            # pylab.loglog(ll,kk)

            area = Nx * Ny * self.pixScaleX * self.pixScaleY
            # p = numpy.reshape(kk,[Ny,Nx]) /area * (Nx*Ny)**2
            p = kk  # / area * (Nx*Ny)**2
        else:
            area = Nx * Ny * self.pixScaleX * self.pixScaleY
            p = twodPower.powerMap  # /area*(Nx*Ny)**2

        realPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)
        imgPart = numpy.sqrt(p) * numpy.random.randn(Ny, Nx)

        kMap = realPart + 1j * imgPart
        if have_pyFFTW:
            data = numpy.real(ifft2(kMap, threads=threads))
        else:
            data = numpy.real(ifft2(kMap))

        b = bufferFactor
        self.data = data[(b - 1) / 2 * self.Ny : (b + 1) / 2 * self.Ny, (b - 1) / 2 * self.Nx : (b + 1) / 2 * self.Nx]
Ejemplo n.º 30
0
def initialize_nonparam_2d_nested_filter(X, gridres=1.0, **kwargs):
    """Function to compute the local Fourier filters using a nested approach.

    Parameters
    ----------
    X : array-like
        Two-dimensional array containing the input field. All values are required
        to be finite and the domain must be square.
    gridres : float
        Grid resolution in km.

    Other Parameters
    ----------------
    max_level : int
        Localization parameter. 0: global noise, >0: increasing degree of localization.
        Default : 3
    win_type : string ['hanning', 'flat-hanning']
        Type of window used for localization.
        Default : flat-hanning
    war_thr : float [0;1]
        Threshold for the minimum fraction of rain needed for computing the FFT.
        Default : 0.1

    Returns
    -------
    F : array-like
        Four-dimensional array containing the 2d fourier filters distributed over
        a 2d spatial grid.
    """

    if len(X.shape) != 2:
        raise ValueError("X must be a two-dimensional array")
    if X.shape[0] != X.shape[1]:
        raise ValueError("a square array expected, but the shape of X is (%d,%d)" % \
                         (X.shape[0], X.shape[1]))
    if np.any(np.isnan(X)):
        raise ValueError("X must not contain NaNs")

    # defaults
    max_level = kwargs.get('max_level', 3)
    win_type = kwargs.get('win_type', 'flat-hanning')
    war_thr = kwargs.get('war_thr', 0.1)

    # make sure non-rainy pixels are set to zero
    min_value = np.min(X)
    X = X.copy()
    X -= min_value

    #
    dim = X.shape
    dim_x = dim[1]
    dim_y = dim[0]

    # Nested algorithm

    # prepare indices
    Idxi = np.array([[0, dim_y]])
    Idxj = np.array([[0, dim_x]])
    Idxipsd = np.array([[0, 2**max_level]])
    Idxjpsd = np.array([[0, 2**max_level]])

    # generate the FFT sample frequencies
    freq = fft.fftfreq(dim_y, gridres)
    fx, fy = np.meshgrid(freq, freq)
    freq_grid = np.sqrt(fx**2 + fy**2)

    # domain fourier filter
    F0 = initialize_nonparam_2d_fft_filter(X, win_type=win_type, donorm=True)
    # and allocate it to the final grid
    F = np.zeros((2**max_level, 2**max_level, F0.shape[0], F0.shape[1]))
    F += F0[np.newaxis, np.newaxis, :, :]

    # now loop levels and build composite spectra
    level = 0
    while level < max_level:

        for m in range(len(Idxi)):

            # the indices of rainfall field
            Idxinext, Idxjnext = _split_field(Idxi[m, :], Idxj[m, :], 2)
            # the indices of the field of fourier filters
            Idxipsdnext, Idxjpsdnext = _split_field(Idxipsd[m, :],
                                                    Idxjpsd[m, :], 2)

            for n in range(len(Idxinext)):

                mask = _get_mask(dim, Idxinext[n, :], Idxjnext[n, :], win_type)
                war = np.sum((X * mask) > 0.01) / float(
                    (Idxinext[n, 1] - Idxinext[n, 0])**2)

                if war > war_thr:
                    # the new filter
                    newfilter = initialize_nonparam_2d_fft_filter(
                        X * mask, win_type=None, donorm=True)

                    # compute logistic function to define weights as function of frequency
                    # k controls the shape of the weighting function
                    # TODO: optimize parameters
                    k = 0.05
                    x0 = (Idxinext[n, 1] - Idxinext[n, 0]) / 2.
                    merge_weights = 1 / (1 + np.exp(-k * (1 / freq_grid - x0)))
                    newfilter *= (1 - merge_weights)

                    # perform the weighted average of previous and new fourier filters
                    F[Idxipsdnext[n, 0]:Idxipsdnext[n, 1],
                      Idxjpsdnext[n, 0]:Idxjpsdnext[
                          n, 1], :, :] *= merge_weights[np.newaxis,
                                                        np.newaxis, :, :]
                    F[Idxipsdnext[n, 0]:Idxipsdnext[n, 1],
                      Idxjpsdnext[n, 0]:Idxjpsdnext[n, 1], :, :] += newfilter[
                          np.newaxis, np.newaxis, :, :]

        # update indices
        level += 1
        Idxi, Idxj = _split_field((0, dim[0]), (0, dim[1]), 2**level)
        Idxipsd, Idxjpsd = _split_field((0, 2**max_level), (0, 2**max_level),
                                        2**level)

    return F
Ejemplo n.º 31
0
    def plot_fixed_t32(self,
                       t32_time,
                       *,
                       part='real',
                       signal='rephasing',
                       ft=True,
                       savefig=True,
                       omega_0=1):
        """"""
        self.load_eigen_params()
        t32_index, t32_time = self.get_closest_index_and_value(
            t32_time, self.t32_array)
        dt21 = self.t21_array[1] - self.t21_array[0]
        dt43 = self.t43_array[1] - self.t43_array[0]
        if signal == 'rephasing':
            sig = self.rephasing_signal[:, t32_index, :]
        elif signal == 'nonrephasing':
            sig = self.nonrephasing_signal[:, t32_index, :]

        if ft:
            w21 = fftshift(fftfreq(self.t21_array.size, d=dt21)) * 2 * np.pi
            w21 += self.ground_to_excited_transition + self.center
            w21 *= omega_0
            w43 = fftshift(fftfreq(self.t43_array.size, d=dt43)) * 2 * np.pi
            w43 += self.ground_to_excited_transition + self.center
            w43 *= omega_0
            X, Y = np.meshgrid(w21, w43, indexing='ij')
            if signal == 'nonrephasing':
                ifft_t21_norm = self.t21_array.size * dt21
                ifft_t43_norm = self.t43_array.size * dt43
                sig = fftshift(ifftn(sig, axes=(0, 1)),
                               axes=(0, 1)) * ifft_t21_norm * ifft_t43_norm
            elif signal == 'rephasing':
                fft_t21_norm = dt21
                ifft_t43_norm = self.t43_array.size * dt43
                sig = fftshift(ifft(sig, axis=1), axes=(1)) * ifft_t43_norm
                sig = fftshift(fft(sig, axis=0), axes=(0)) * fft_t21_norm
            if omega_0 == 1:
                xlab = '$\omega_{21}$ ($\omega_0$)'
                ylab = '$\omega_{43}$ ($\omega_0$)'
            else:
                xlab = '$\omega_{21}$ (cm$^{-1}$)'
                ylab = '$\omega_{43}$ (cm$^{-1}$)'
        else:
            X = self.T21[:, 0, :]
            Y = self.T43[:, 0, :]
            xlab = r'$t_{21}$ ($\omega_0^{-1}$)'
            ylab = r'$t_{43}$ ($\omega_0^{-1}$)'

        if part == 'real':
            sig = sig.real
        if part == 'imag':
            sig = sig.imag

        plt.figure()
        plt.contour(X, Y, sig, 12, colors='k')
        plt.contourf(X, Y, sig, 12)
        plt.title(part + ' ' + signal + r' at $t_{32}$' +
                  ' = {}'.format(t32_time))
        plt.xlabel(xlab)
        plt.ylabel(ylab)
        plt.xlim([17000, 21000])
        plt.ylim([17000, 21000])
        plt.colorbar()
        if savefig:
            fig_name = os.path.join(
                self.base_path,
                part + '_' + signal + '_t_32_{}.png'.format(t32_time))
            plt.savefig(fig_name)
        plt.show()
Ejemplo n.º 32
0
def accelsearch(times,
                signal,
                delta_z=1,
                fmin=1,
                fmax=1e32,
                gti=None,
                zmax=100,
                candidate_file=None,
                ref_time=0,
                debug=False,
                interbin=False,
                nproc=4,
                det_p_value=0.15,
                fft_rescale=None):
    """Find pulsars with accelerated search.

    The theory behind these methods is described in Ransom+02, AJ 124, 1788.

    Parameters
    ----------
    times : array of floats
        An evenly spaced list of times
    signal : array of floats
        The light curve, in counts; same length as ``times``

    Other parameters
    ----------------
    delta_z : float
        The spacing in ``z`` space (delta_z = 1 -> delta_fdot = 1/T**2)
    fmin : float, default 1.
        Minimum frequency to search
    fmax : float, default 1e32
        Maximum frequency to search
    gti : ``[[gti00, gti01], [gti10, gti11], ...]``, default None
        Good Time Intervals. If None, it assumes the full range
        ``[[time[0] - dt / 2 -- time[-1] + dt / 2]]``
    zmax : int, default 100
        Maximum frequency derivative to search (pos and neg), in bins.
        It corresponds to ``fdot_max = zmax / T**2``, where ``T`` is the
        length of the observation.
    candidate_file : str, default None
        Save the final candidate table to this file. If None, the table
        is just returned and not saved.
    ref_time : float, default 0
        Reference time for the times
    det_p_value : float, default 0.015
        Detection p-value (tail probability of noise powers, corrected for the
        number of trials)
    fft_rescale : function
        Any function to apply to the initial FFT, normalized by the number of
        photons as FT * np.sqrt(2/nph) so that || FT ||^2 are Leahy powers.
        For example, a filter to flatten the spectrum in the presence of strong
        red noise.

    Returns
    -------
    candidate_table: :class:`Table`
        Table containing the candidate frequencies and frequency derivatives,
        the spectral power in Leahy normalization, the detection probability,
        the time and the observation length.

    """
    if not isinstance(times, np.ndarray):
        times = np.asarray(times)
    if not isinstance(signal, np.ndarray):
        signal = np.asarray(signal)

    dt = times[1] - times[0]
    if gti is not None:
        gti = np.asarray(gti)
        # Fill in the data with a constant outside GTIs
        gti_mask = create_gti_mask(times, gti)
        expo_fraction = np.count_nonzero(gti_mask) / len(gti_mask)
        bti_mask = ~gti_mask
        mean_ops = np.mean
        if np.mean(signal) > 10:
            mean_ops = np.median
        signal[bti_mask] = mean_ops(signal[gti_mask])
    else:
        expo_fraction = 1
        gti = np.array([[times[0] - dt / 2, times[-1] + dt / 2]])

    n_photons = np.sum(signal)
    spectr = fft(signal) * np.sqrt(2 / n_photons)
    freq = fftfreq(len(spectr), dt)

    if debug:
        _good_f = freq > 0
        import matplotlib.pyplot as plt
        fig = plt.figure(figsize=(12, 8))
        plt.plot(freq[_good_f], (spectr * spectr.conj()).real[_good_f],
                 label='initial PDS')
        plt.xlabel("Frequency (Hz)")
        plt.ylabel("Power (Leahy)")
        plt.loglog()

    if fft_rescale is not None:
        log.info("Applying initial filters...")
        spectr = fft_rescale(spectr)

    if debug:
        plt.plot(freq[_good_f], (spectr * spectr.conj()).real[_good_f],
                 label='PDS after filtering (if any)')
        fname = candidate_file + '_initial_spec.png' \
            if candidate_file else 'initial_spec.png'
        plt.legend(loc=2)
        del _good_f
        plt.savefig(fname)
        plt.close(fig)

    T = times[-1] - times[0] + dt

    freq_intv_to_search = (freq >= fmin) & (freq < fmax)
    log.info("Starting search over full plane...")
    start_z = -zmax
    end_z = zmax
    range_z = np.arange(start_z, end_z, delta_z)
    log.info("min and max possible r_dot: {}--{}".format(
        delta_z / T**2,
        np.max(range_z) / T**2))
    freqs_to_search = freq[freq_intv_to_search]

    candidate_table = Table(names=[
        'time', 'length', 'frac_exposure', 'power', 'prob', 'frequency',
        'fdot', 'fddot', 'ntrial'
    ],
                            dtype=[float] * 8 + [int])

    detlev = pds_detection_level(ntrial=freqs_to_search.size,
                                 epsilon=det_p_value)

    responses = _create_responses(range_z)

    candidate_rs, candidate_js, candidate_powers = \
        _calculate_all_convolutions(spectr, responses,
                                    freq_intv_to_search, detlev,
                                    debug=debug, interbin=interbin,
                                    nproc=nproc)

    for r, j, cand_power in zip(candidate_rs, candidate_js, candidate_powers):
        z = range_z[j]
        cand_freq = r / T
        fdot = z / T**2
        prob = pds_probability(cand_power, freqs_to_search.size)
        candidate_table.add_row([
            ref_time + gti[0, 0], T, expo_fraction, cand_power, prob,
            cand_freq, fdot, 0, freqs_to_search.size
        ])

    if candidate_file is not None:
        candidate_table.write(candidate_file + '.csv', overwrite=True)

    return candidate_table