Ejemplo n.º 1
0
def _fft_filter_setup(
    image_shape: Tuple[int, int], window: Union[np.ndarray, Window],
) -> Tuple[Tuple[int, int], np.ndarray, Tuple[int, int], Tuple[int, int]]:
    window_shape = window.shape

    # Optimal FFT shape
    #    real_fft_only = True
    fft_shape = (
        next_fast_len(
            image_shape[0] + window_shape[0] - 1
        ),  # , real_fft_only),
        next_fast_len(
            image_shape[1] + window_shape[1] - 1
        ),  # , real_fft_only),
    )

    # Pad window to optimal FFT size
    window_pad = _pad_window(window, fft_shape)

    # Obtain the transfer function via the real valued FFT
    transfer_function = rfft2(window_pad)

    # Image offset before FFT and after IFFT
    offset_before = _offset_before_fft(window_shape)
    offset_after = _offset_after_ifft(window_shape)

    return fft_shape, transfer_function, offset_before, offset_after
Ejemplo n.º 2
0
def shift_data_subpixel(inputs):
    ''' rigid shift of X by ymax and xmax '''
    ''' allows subpixel shifts '''
    ''' ** not being used ** '''
    X, ymax, xmax, pad_fft = inputs
    ymax = ymax.flatten()
    xmax = xmax.flatten()
    if X.ndim<3:
        X = X[np.newaxis,:,:]

    nimg, Ly0, Lx0 = X.shape
    if pad_fft:
        X = fft2(X.astype('float32'), (next_fast_len(Ly0), next_fast_len(Lx0)))
    else:
        X = fft2(X.astype('float32'))
    nimg, Ly, Lx = X.shape
    Ny = fft.ifftshift(np.arange(-np.fix(Ly/2), np.ceil(Ly/2)))
    Nx = fft.ifftshift(np.arange(-np.fix(Lx/2), np.ceil(Lx/2)))
    [Nx,Ny] = np.meshgrid(Nx,Ny)
    Nx = Nx.astype('float32') / Lx
    Ny = Ny.astype('float32') / Ly
    dph = Nx * np.reshape(xmax, (-1,1,1)) + Ny * np.reshape(ymax, (-1,1,1))
    Y = np.real(ifft2(X * np.exp((2j * np.pi) * dph)))
    # crop back to original size
    if Ly0<Ly or Lx0<Lx:
        Lyhalf = int(np.floor(Ly/2))
        Lxhalf = int(np.floor(Lx/2))
        Y = Y[np.ix_(np.arange(0,nimg,1,int),
                     np.arange(-np.fix(Ly0/2), np.ceil(Ly0/2),1,int) + Lyhalf,
                     np.arange(-np.fix(Lx0/2), np.ceil(Lx0/2),1,int) + Lxhalf)]
    return Y
Ejemplo n.º 3
0
def combine_psfs(psf_1, psf_2, are_psf=False):
    """
        Function to convolve two 2D arrays.

        Parameters:
            psf_1 (numpy.ndarray): First 2D array
            psf_2 (numpy.ndarray): Second 2D array
            are_psf (bool): If true, the 2D array is a PSF and gradient of PSF otherwise.

        Returns:
            numpy.ndarray: Convolution of the input 2D arrays
    """

    shape_1 = np.array(psf_1.shape)
    shape_2 = np.array(psf_2.shape)
    conv_shape = shape_1 + shape_2 - 1

    fast_shape = np.array(
        [next_fast_len(int(conv_shape[0])),
         next_fast_len(int(conv_shape[1]))])

    new_psf = np.fft.irfft2(np.fft.rfft2(psf_1, s=fast_shape) *
                            np.fft.rfft2(psf_2, s=fast_shape),
                            s=fast_shape)
    new_psf = new_psf[:conv_shape[0], :conv_shape[1]]

    if are_psf:
        assert np.isclose(np.sum(new_psf),
                          1.0), "Sum of PSF is {}".format(np.sum(new_psf))
        assert np.isclose(
            new_psf[new_psf.shape[0] // 2, new_psf.shape[1] // 2],
            np.max(new_psf)), "Difference is {}".format(
                new_psf[new_psf.shape[0] // 2, new_psf.shape[1] // 2] -
                np.max(new_psf))
    return new_psf
Ejemplo n.º 4
0
def prepare_masks(refImg0, ops):
    refImg = refImg0.copy()
    if ops['1Preg']:
        maskSlope = ops['spatial_taper']  # slope of taper mask at the edges
    else:
        maskSlope = 3 * ops['smooth_sigma']  # slope of taper mask at the edges
    Ly, Lx = refImg.shape
    maskMul = spatial_taper(maskSlope, Ly, Lx)

    if ops['1Preg']:
        refImg = one_photon_preprocess(refImg[np.newaxis, :, :], ops).squeeze()
    maskOffset = refImg.mean() * (1. - maskMul)

    # reference image in fourier domain
    if ops['pad_fft']:
        cfRefImg = np.conj(
            fft2(refImg, (next_fast_len(ops['Ly']), next_fast_len(ops['Lx']))))
    else:
        cfRefImg = np.conj(fft2(refImg))

    absRef = np.absolute(cfRefImg)
    cfRefImg = cfRefImg / (eps0 + absRef)

    # gaussian filter in space
    fhg = gaussian_fft(ops['smooth_sigma'], cfRefImg.shape[0],
                       cfRefImg.shape[1])
    cfRefImg *= fhg

    maskMul = maskMul.astype('float32')
    maskOffset = maskOffset.astype('float32')
    cfRefImg = cfRefImg.astype('complex64')
    cfRefImg = np.reshape(cfRefImg, (1, cfRefImg.shape[0], cfRefImg.shape[1]))
    return maskMul, maskOffset, cfRefImg
Ejemplo n.º 5
0
def phasecorr_reference(refImg0: np.ndarray,
                        maskSlope,
                        smooth_sigma,
                        yblock,
                        xblock,
                        pad_fft: bool = False):
    """
    Computes taper and fft'ed reference image for phasecorr.

    Parameters
    ----------
    refImg0: array
    maskSlope
    smooth_sigma
    yblock
    xblock
    pad_fft: bool
        whether to do border padding in the fft step

    Returns
    -------
    maskMul
    maskOffset
    cfRefImg

    """
    nb, Ly, Lx = len(
        yblock), yblock[0][1] - yblock[0][0], xblock[0][1] - xblock[0][0]
    dims = (nb, Ly, Lx)
    cfRef_dims = (nb, next_fast_len(Ly),
                  next_fast_len(Lx)) if pad_fft else dims
    gaussian_filter = gaussian_fft(smooth_sigma, *cfRef_dims[1:])
    cfRefImg1 = np.empty(cfRef_dims, 'complex64')

    maskMul = spatial_taper(maskSlope, *refImg0.shape)
    maskMul1 = np.empty(dims, 'float32')
    maskMul1[:] = spatial_taper(2 * smooth_sigma, Ly, Lx)
    maskOffset1 = np.empty(dims, 'float32')
    for yind, xind, maskMul1_n, maskOffset1_n, cfRefImg1_n in zip(
            yblock, xblock, maskMul1, maskOffset1, cfRefImg1):
        ix = np.ix_(
            np.arange(yind[0], yind[-1]).astype('int'),
            np.arange(xind[0], xind[-1]).astype('int'))
        refImg = refImg0[ix]

        # mask params
        maskMul1_n *= maskMul[ix]
        maskOffset1_n[:] = refImg.mean() * (1. - maskMul1_n)

        # gaussian filter
        cfRefImg1_n[:] = np.conj(fft.fft2(refImg))
        cfRefImg1_n /= 1e-5 + np.absolute(cfRefImg1_n)
        cfRefImg1_n[:] *= gaussian_filter

    return maskMul1[:, np.
                    newaxis, :, :], maskOffset1[:, np.
                                                newaxis, :, :], cfRefImg1[:,
                                                                          np.
                                                                          newaxis, :, :]
Ejemplo n.º 6
0
def phasecorr_reference(refImg0, ops):
    """ computes masks and fft'ed reference image for phasecorr

    Parameters
    ----------
    refImg0 : int16
        reference image
    ops : dictionary
        requires 'smooth_sigma'
        (if ```ops['1Preg']```, need 'spatial_taper', 'spatial_hp', 'pre_smooth')

    Returns
    -------
    maskMul : float32
        mask that is multiplied to spatially taper frames
    maskOffset : float32
        shifts in x from cfRefImg to data for each frame
    cfRefImg : complex64
        reference image fft'ed and complex conjugate and multiplied by gaussian
        filter in the fft domain with standard deviation 'smooth_sigma'

    """
    refImg = refImg0.copy()
    if '1Preg' in ops and ops['1Preg']:
        maskSlope = ops['spatial_taper']  # slope of taper mask at the edges
    else:
        maskSlope = 3 * ops['smooth_sigma']  # slope of taper mask at the edges
    Ly, Lx = refImg.shape
    maskMul = utils.spatial_taper(maskSlope, Ly, Lx)

    if ops['1Preg']:
        refImg = utils.one_photon_preprocess(refImg[np.newaxis, :, :],
                                             ops).squeeze()
    maskOffset = refImg.mean() * (1. - maskMul)

    # reference image in fourier domain
    if 'pad_fft' in ops and ops['pad_fft']:
        cfRefImg = np.conj(fft2(refImg,
                                (next_fast_len(Ly), next_fast_len(Lx))))
    else:
        cfRefImg = np.conj(fft2(refImg))

    absRef = np.absolute(cfRefImg)
    cfRefImg = cfRefImg / (1e-5 + absRef)

    # gaussian filter in space
    fhg = utils.gaussian_fft(ops['smooth_sigma'], cfRefImg.shape[0],
                             cfRefImg.shape[1])
    cfRefImg *= fhg

    maskMul = maskMul.astype('float32')
    maskOffset = maskOffset.astype('float32')
    cfRefImg = cfRefImg.astype('complex64')
    cfRefImg = np.reshape(cfRefImg, (1, cfRefImg.shape[0], cfRefImg.shape[1]))
    return maskMul, maskOffset, cfRefImg
Ejemplo n.º 7
0
def phasecorr_reference(refImg1, ops):
    """ create blocked reference image and take its fft and multiply by gaussian smoothing mask """

    if 'yblock' not in ops:
        ops = utils.make_blocks(ops)

    refImg0=refImg1.copy()
    if ops['1Preg']:
        maskSlope    = ops['spatial_taper']
    else:
        maskSlope    = 3 * ops['smooth_sigma'] # slope of taper mask at the edges
    Ly,Lx = refImg0.shape
    maskMul = utils.spatial_taper(maskSlope, Ly, Lx)

    if ops['1Preg']:
        refImg0 = utils.one_photon_preprocess(refImg0[np.newaxis,:,:], ops).squeeze()

    # split refImg0 into multiple parts
    cfRefImg1 = []
    maskMul1 = []
    maskOffset1 = []
    nb = len(ops['yblock'])

    #patch taper
    Ly = ops['yblock'][0][1] - ops['yblock'][0][0]
    Lx = ops['xblock'][0][1] - ops['xblock'][0][0]
    if ops['pad_fft']:
        cfRefImg1 = np.zeros((nb,1,next_fast_len(Ly), next_fast_len(Lx)),'complex64')
    else:
        cfRefImg1 = np.zeros((nb,1,Ly,Lx),'complex64')
    maskMul1 = np.zeros((nb,1,Ly,Lx),'float32')
    maskOffset1 = np.zeros((nb,1,Ly,Lx),'float32')
    for n in range(nb):
        yind = ops['yblock'][n]
        yind = np.arange(yind[0],yind[-1]).astype('int')
        xind = ops['xblock'][n]
        xind = np.arange(xind[0],xind[-1]).astype('int')

        refImg = refImg0[np.ix_(yind,xind)]
        maskMul2 = utils.spatial_taper(2 * ops['smooth_sigma'], Ly, Lx)
        maskMul1[n,0,:,:] = maskMul[np.ix_(yind,xind)].astype('float32')
        maskMul1[n,0,:,:] *= maskMul2.astype('float32')
        maskOffset1[n,0,:,:] = (refImg.mean() * (1. - maskMul1[n,0,:,:])).astype(np.float32)
        cfRefImg   = np.conj(fft.fft2(refImg))
        absRef     = np.absolute(cfRefImg)
        cfRefImg   = cfRefImg / (1e-5 + absRef)

        # gaussian filter
        fhg = utils.gaussian_fft(ops['smooth_sigma'], cfRefImg.shape[0], cfRefImg.shape[1])
        cfRefImg *= fhg

        cfRefImg1[n,0,:,:] = (cfRefImg.astype('complex64'))
    return maskMul1, maskOffset1, cfRefImg1
Ejemplo n.º 8
0
def standard_mask(mask, extend_ratio=EXTEND_RATIO, smooth=True, taper=True):
    '''``mask`` modified in-place
    '''
    # _smooth_window might change mask.shape if mode == 'full'
    n_x = next_fast_len(mask.shape[0] * EXTEND_RATIO)

    # tapering need to be done before smoothing, because the smoothing will change the boundary
    if taper:
        mask *= _get_taper(mask > 0., WIDTH)

    if smooth:
        if taper:
            # if taperring, then smoothing beyond the boundary doesn't make sense
            mask_boolean = mask > 0.
            temp = np.zeros_like(mask)
            temp[mask_boolean] = _smooth_window(mask,
                                                mode='same')[mask_boolean]
            mask = temp
            del mask_boolean, temp
        else:
            mask = _smooth_window(mask, mode='full')

    # this normalization doesn't matter
    # mask is used in 2 places, mode-coupling, and pseudo-spectra
    # in mode-coupling, norm_fft is used
    # in pseudo-spectra as a weight, the absolute scale doesn't matter
    # normalize_max(mask)

    return zero_padding(mask, (n_x, n_x))
Ejemplo n.º 9
0
def get_ns(wf1,source_conf,insta):
    
    # Nr of time steps in traces
    if insta:
        # get path to instaseis db
        #ToDo: ugly.
        dbpath = json.load(open(os.path.join(source_conf['project_path'],
            'config.json')))['wavefield_path']
        # open 
        db = instaseis.open_db(dbpath)
        # get a test seismogram to determine...
        stest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,
            longitude=0.0),receiver=instaseis.Receiver(latitude=10.,
            longitude=0.0),dt=1./source_conf['sampling_rate'])[0]
        
        nt = stest.stats.npts
        Fs = stest.stats.sampling_rate
    else:
        with WaveField(wf1) as wf1:
            nt = int(wf1.stats['nt'])
            Fs = round(wf1.stats['Fs'],8)
    
    # Necessary length of zero padding for carrying out frequency domain correlations/convolutions
    n = next_fast_len(2*nt-1)     
    
    # Number of time steps for synthetic correlation
    n_lag = int(source_conf['max_lag'] * Fs)
    if nt - 2*n_lag <= 0:
        click.secho('Resetting maximum lag to %g seconds: Synthetics are too\
 short for a maximum lag of %g seconds.' %(nt//2/Fs,n_lag/Fs))
        n_lag = nt // 2
        
    n_corr = 2*n_lag + 1
    
    return nt,n,n_corr,Fs
Ejemplo n.º 10
0
    def crop_tails(self):
        """Crops out tails after every impulse response has decayed to noise floor."""
        if self.fs != self.estimator.fs:
            raise ValueError(
                'Refusing to crop tails because HRIR\'s sampling rate doesn\'t match impulse response '
                'estimator\'s sampling rate.')
        # Find indices after which there is only noise in each track
        tail_indices = []
        lengths = []
        for speaker, pair in self.irs.items():
            for side, ir in pair.items():
                _, tail_ind, _, _ = ir.decay_params()
                tail_indices.append(tail_ind)
                lengths.append(len(ir))

        # Crop all tracks by last tail index
        seconds_per_octave = len(
            self.estimator) / self.estimator.fs / self.estimator.n_octaves
        fade_out = 2 * int(self.fs * seconds_per_octave *
                           (1 / 24))  # Duration of 1/24 octave in the sweep
        window = signal.hanning(fade_out)[fade_out // 2:]
        fft_len = fftpack.next_fast_len(max(tail_indices))
        tail_ind = min(np.min(lengths), fft_len)
        for speaker, pair in self.irs.items():
            for ir in pair.values():
                ir.data = ir.data[:tail_ind]
                ir.data *= np.concatenate(
                    [np.ones(len(ir.data) - len(window)), window])
Ejemplo n.º 11
0
def real_stream_cc_output_dict(real_templates, real_multichannel_stream):
    """ return a dict of outputs from all stream_xcorr functions """
    out = {}
    fft_len = next_fast_len(real_templates[0][0].stats.npts +
                            real_multichannel_stream[0].stats.npts + 1)
    short_fft_len = 2**8
    for name, func in stream_funcs.items():
        for cores in [1, cpu_count()]:
            print("Running {0} with {1} cores".format(name, cores))

            cc_out = time_func(func,
                               name,
                               real_templates,
                               real_multichannel_stream,
                               cores=cores,
                               fft_len=short_fft_len)
            out["{0}.{1}".format(name, cores)] = cc_out
            if "fftw" in name:
                print("Running fixed fft-len: {0}".format(fft_len))
                # Make sure that running with a pre-defined fft-len works
                cc_out = time_func(func,
                                   name,
                                   real_templates,
                                   real_multichannel_stream,
                                   cores=cores,
                                   fft_len=fft_len)
                out["{0}.{1}_fixed_fft".format(name, cores)] = cc_out
    return out
Ejemplo n.º 12
0
def spectral_whitening(data, sr=None, smooth=None, filter=None,
                       waterlevel=1e-8):
    """
    Apply spectral whitening to data

    Data is divided by its smoothed (Default: None) amplitude spectrum.

    :param data: numpy array with data to manipulate
    :param sr: sampling rate (only needed for smoothing)
    :param smooth: length of smoothing window in Hz
        (default None -> no smoothing)
    :param filter: filter spectrum with bandpass after whitening
        (tuple with min and max frequency)
    :param waterlevel: waterlevel relative to mean of spectrum

    :return: whitened data
    """
    data = _fill_array(data, fill_value=0.)
    mask = np.ma.getmask(data)
    nfft = next_fast_len(len(data))
    spec = fft(data, nfft)
    spec_ampl = np.abs(spec)
    spec_ampl /= np.max(spec_ampl)
    if smooth:
        smooth = int(smooth * nfft / sr)
        spec_ampl = ifftshift(smooth_func(fftshift(spec_ampl), smooth))
    # save guard against division by 0
    spec_ampl[spec_ampl < waterlevel] = waterlevel
    spec /= spec_ampl
    if filter is not None:
        spec *= _filter_resp(*filter, sr=sr, N=len(spec), whole=True)[1]
    ret = np.real(ifft(spec, nfft)[:len(data)])
    return _fill_array(ret, mask=mask, fill_value=0.)
Ejemplo n.º 13
0
def FFT(data: list, samplingFrequency: int,
        zeroPadding: bool) -> (List[float], List[float], List[float]):

    signalLength = len(data)

    if zeroPadding:
        fftLength = fftpack.next_fast_len(signalLength)
    else:
        fftLength = signalLength

    amplitudePhase = fftpack.fft(data, fftLength) / signalLength

    if fftLength % 2 == 0:
        binCount = int(fftLength / 2 + 1)
        amplitude = abs(amplitudePhase[0:binCount])
        amplitude[1:-1] = 2 * amplitude[1:-1]
    else:
        binCount = int((fftLength + 1) / 2)
        amplitude = abs(amplitudePhase[0:binCount])
        amplitude[1:] = 2 * amplitude[1:]

    phase = numpy.angle(amplitudePhase[0:binCount])
    frequency = samplingFrequency / 2 * numpy.linspace(0, 1, binCount)

    return (frequency, amplitude, phase)
Ejemplo n.º 14
0
 def _make_trace_df(self, phase_df):
     """
     Make the data arrays.
     """
     # get time rep. in dataframe
     sampling_rate = phase_df['sampling_rate'].unique()[0]
     num_samples = (phase_df['tw_end'] - phase_df['tw_start']) * sampling_rate
     array_len = next_fast_len(int(num_samples.max() + 1))
     time = np.arange(0, array_len) * (1. / sampling_rate)
     # apply preprocessing to each trace
     traces = phase_df.apply(self.process_trace, axis=1)
     # create numpy array, fill with data
     values = np.zeros((len(phase_df.index), len(time)))
     for i, trace in enumerate(traces.values):
         values[i, 0:len(trace.data)] = trace.data
     # init df from filled values
     df = pd.DataFrame(values, index=phase_df.index, columns=time)
     # set name of columns
     df.columns.name = 'time'
     # add original lengths to the channel_info
     lens = traces.apply(lambda x: len(x.data))
     self.channel_info.data['sample_count'] = lens
     # apply time-domain pre-processing
     df = self.process_trace_dataframe_hook(df)
     return df
Ejemplo n.º 15
0
    def stack(self, data, stack_type='Linear Stack', order=2):
        """
        Stack data by first axis.

        :type stack_type: str or tuple
        :param stack_type: Type of stack, one of the following:
            ``'linear'``: average stack (default),
            ``('pw', order)``: phase weighted stack of given order
            (see [Schimmel1997]_, order 0 corresponds to linear stack),
            ``('root', order)``: root stack of given order
            (order 1 corresponds to linear stack).
        """

        if stack_type == 'Linear Stack':
            stack = np.mean(data, axis=0)
        elif stack_type == 'Phase Weigth Stack':
            npts = np.shape(data)[1]
            nfft = next_fast_len(npts)
            anal_sig = hilbert(data, N=nfft)[:, :npts]
            phase_stack = np.abs(np.mean(anal_sig, axis=0))**order
            stack = np.mean(data, axis=0) * phase_stack
        elif stack_type == 'root':
            r = np.mean(np.sign(data) * np.abs(data)**(1 / order), axis=0)
            stack = np.sign(r) * np.abs(r)**order
        else:
            raise ValueError('stack type is not valid.')

        return stack
Ejemplo n.º 16
0
def correlate_phase(data1, data2, shift, demean=True, normalize=True):
    from scipy.signal import hilbert
    from scipy.fftpack import next_fast_len
    assert len(data1) == len(data2)
    nfft = next_fast_len(len(data1))
    if demean:
        data1 = data1 - np.mean(data1)
        data2 = data2 - np.mean(data2)
    sig1 = hilbert(data1, N=nfft)[:len(data1)]
    sig2 = hilbert(data2, N=nfft)[:len(data2)]
    phi1 = np.angle(sig1)
    phi2 = np.angle(sig2)
    def phase_stack(phi1, phi2, shift):
        s1 = max(0, shift)
        s2 = min(0, shift)
        N = len(phi1)
        assert len(phi2) == N
        return np.sum(np.abs(np.cos((phi1[s1:N+s2] - phi2[-s2:N-s1]) / 2)) -
                      np.abs(np.sin((phi1[s1:N+s2] - phi2[-s2:N-s1]) / 2)))

    cc = [phase_stack(phi1, phi2, s) for s in range(-shift, shift + 1)]
    cc = np.array(cc)
    if normalize:
        cc = cc / len(data1)
    return cc
Ejemplo n.º 17
0
def array_ccs_low_amp(array_template, array_stream, pads):
    """ Use each function stored in the normxcorr cache to correlate the
     templates and arrays, return a dict with keys as func names and values
     as the cc calculated by said function.
     This specifically tests low amplitude streams as raised in issue #181."""
    out = {}
    arr_stream = array_stream * 10e-8
    for name in list(corr.XCORR_FUNCS_ORIGINAL.keys()):
        func = corr.get_array_xcorr(name)
        print("Running {0} with low-variance".format(name))
        _log_handler.reset()
        cc, _ = time_func(func, name, array_template, arr_stream, pads)
        out[name] = (cc, copy.deepcopy(log_messages['warning']))
        if "fftw" in name:
            print("Running fixed len fft")
            _log_handler.reset()
            fft_len = next_fast_len(
                max(len(array_stream) // 4, len(array_template)))
            cc, _ = time_func(func,
                              name,
                              array_template,
                              array_stream,
                              pads,
                              fft_len=fft_len)
            out[name + "_fixed_len"] = (cc,
                                        copy.deepcopy(log_messages['warning']))
    return out
Ejemplo n.º 18
0
def autocov(samples, axis=-1):
        """Compute autocovariance estimates for every lag for the input array.
        Parameters
        ----------
        samples : `numpy.ndarray(n_chains, n_iters)`
            An array containing samples
        Returns
        -------
        acov: `numpy.ndarray`
            Autocovariance of samples that has same size as the input array
        """
        axis = axis if axis > 0 else len(samples.shape) + axis
        n = samples.shape[axis]
        m = next_fast_len(2 * n)

        samples = samples - samples.mean(axis, keepdims=True)

        # added to silence tuple warning for a submodule
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            ifft_samp = np.fft.rfft(samples, n=m, axis=axis)
            ifft_samp *= np.conjugate(ifft_samp)

            shape = tuple(
                slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(samples.shape)
            )
            cov = np.fft.irfft(ifft_samp, n=m, axis=axis)[shape]
            cov /= n
            return cov
Ejemplo n.º 19
0
def xc_trace(x_trace, x_design, pix_per_mm):
    """
    Use a cross-correlation to find the offset
    """
    #    _x_design = -pix_per_mm*x_design[::-1]
    _x_design = pix_per_mm * x_design
    size = slit_function_length(_x_design, oversample=10)[1]
    fftsize = fftpack.next_fast_len(size)
    design_offset, design_slits_x, design_slits_y \
            = build_slit_function(_x_design, oversample=10, size=fftsize)
    trace_offset, trace_slits_x, trace_slits_y \
            = build_slit_function(x_trace, oversample=10, size=fftsize)

    #    pyplot.plot(trace_slits_x, trace_slits_y)
    pyplot.plot(design_slits_x, design_slits_y)
    pyplot.scatter(_x_design,
                   numpy.ones(_x_design.size),
                   color='C3',
                   marker='.',
                   s=50,
                   lw=0)
    pyplot.show()

    xc = signal.correlate(numpy.roll(design_slits_y, (fftsize - size) // 2),
                          numpy.roll(trace_slits_y, (fftsize - size) // 2),
                          mode='full',
                          method='fft')
    pyplot.plot(numpy.arange(xc.size), xc)
    max_xc = numpy.argmax(xc)
    pyplot.scatter(max_xc, xc[max_xc], marker='x', color='C3')
    pyplot.show()

    import pdb
    pdb.set_trace()
Ejemplo n.º 20
0
 def calcFFT(self):
     #Länge des zu transformierenden Vectors bestimmen
     N = len(self.volt)
     #Optimale FFT-Länge bestimmen und ggf Zeropadding
     N_fast = next_fast_len(N)
     #Kaiser-Bessel-Fenster bestimmen
     w = win.kaiser(N,12)
     w = w / w.sum() * N #Normierung damit Amplituden wieder stimmen
     #Transformieren
     yf = fft(self.volt*w, N_fast)
     yf = fftshift(yf)
     #Abschneiden
     yf = yf[N_fast//2:]
     #Skalieren
     yf = 2/N_fast*abs(yf)
     #Frequenzachse erzeugen
     xf = fftfreq(N_fast)
     xf = fftshift(xf)
     #Abschneiden
     xf = xf[N_fast//2:]
     #Skalieren
     xf = xf*self.samplingrate
     #Veröffentlichen der Werte
     self.spectrum = yf
     self.freq = xf
     return '-DONE-'
Ejemplo n.º 21
0
    def _make_trace_df(self, st_array: np.ndarray) -> pd.DataFrame:
        """
        Make the dataframe containing time series data.
        """
        sampling_rate = self.sampling_rate
        assert sampling_rate is not None

        # figure out with streams are fractured and drop them
        good_st, new_ind = self._filter_stream_array(st_array)

        # get lens and create array empty array with next fast fft length
        lens = [len(x[0]) for x in good_st]
        max_fast = next_fast_len(
            max(lens))  # + 1)  # What is the point of the +1?
        values = np.empty((len(new_ind), max_fast)) * np.NaN
        # iterate each stream and fill array
        for i, stream in enumerate(good_st):
            values[i, 0:len(stream[0].data)] = stream[0].data
        # init df from filled values
        time = np.arange(0,
                         float(max_fast) / sampling_rate, 1.0 / sampling_rate)
        df = pd.DataFrame(values, index=new_ind, columns=time)
        # set name of columns
        df.columns.name = "time"
        # add original lengths to the channel_info
        self.stats.loc[new_ind, "npts"] = lens
        return df
Ejemplo n.º 22
0
def gaussian(logLam_temp, line_wave, FWHM_gal, pixel=True):
    """
    Instrumental Gaussian line spread function (LSF), optionally integrated
    within the pixels. The function is normalized in such a way that
    
            line.sum() = 1
    
    When the LSF is not severey undersampled, and when pixel=False, the output
    of this function is nearly indistinguishable from a normalized Gaussian:
    
      x = (logLam_temp[:, None] - np.log(line_wave))/dx
      gauss = np.exp(-0.5*(x/xsig)**2)
      gauss /= np.sqrt(2*np.pi)*xsig

    However, to deal rigorously with the possibility of severe undersampling,
    this Gaussian is defined analytically in frequency domain and transformed
    numerically to time domain. This makes the convolution within pPXF exact
    to machine precision regardless of sigma (including sigma=0).
    
    :param logLam_temp: np.log(wavelength) in Angstrom
    :param line_wave: Vector of lines wavelength in Angstrom
    :param FWHM_gal: FWHM in Angstrom. This can be a scalar or the name of
        a function wich returns the instrumental FWHM for given wavelength.
        In this case the sigma returned by pPXF will be the intrinsic one,
        namely the one corrected for instrumental dispersion, in the same
        way as the stellar kinematics is returned.
      - To measure the *observed* dispersion, ignoring the instrumental
        dispersison, one can set FWHM_gal=0. In this case the Gaussian
        line templates reduce to Dirac delta functions. The sigma returned
        by pPXF will be the same one would measure by fitting a Gaussian
        to the observed spectrum (exept for the fact that this function
        accurately deals with pixel integration).
    :param pixel: set to True to perform integration over the pixels.
    :return: LSF computed for every logLam_temp

    """
    line_wave = np.asarray(line_wave)

    if callable(FWHM_gal):
        FWHM_gal = FWHM_gal(line_wave)

    n = logLam_temp.size
    npad = fftpack.next_fast_len(n)
    nl = npad // 2 + 1  # Expected length of rfft

    dx = (logLam_temp[-1] - logLam_temp[0]) / (n - 1)
    x0 = (np.log(line_wave) - logLam_temp[0]) / dx
    xsig = FWHM_gal / 2.355 / line_wave / dx  # sigma in pixels units
    w = np.linspace(0, np.pi, nl)[:, None]

    # Gaussian with sigma=xsig and center=x0,
    # optionally convolved with an unitary pixel UnitBox[]
    # analytically defined in frequency domain
    # and numerically transformed to time domain
    rfft = np.exp(-0.5 * (w * xsig)**2 - 1j * w * x0)
    if pixel:
        rfft *= np.sinc(w / (2 * np.pi))
    line = np.fft.irfft(rfft, n=npad, axis=0)

    return line[:n, :]
Ejemplo n.º 23
0
def hires_power_spectrum(signal, oversampling=1):
    """
    Return a high resolution power spectrum (compare :func:`power_spectrum`)
    Resolution is enhanced by feeding the FFT a n times larger, zero-padded signal,
    which will yield frequency values of higher precision.

    :param signal: input signal
    :type signal: numpy.array
    :param oversampling: oversampling factor
    :type oversampling: int
    :return: frequencies and fourier transformed values
    :rtype: tuple(numpy.array, numpy.array)
    """
    arr_len = len(signal)
    fast_size = next_fast_len(oversampling * arr_len)

    tmp_data = np.zeros(fast_size)
    tmp_data[:arr_len] = signal

    frequencies, fourier_values = power_spectrum(tmp_data)
    fourier_values[0] = 0

    fourier_values = fourier_values[frequencies < arr_len]
    frequencies = frequencies[frequencies < arr_len]

    return frequencies, fourier_values
Ejemplo n.º 24
0
def autocov(ary, axis=-1):
    """Compute autocovariance estimates for every lag for the input array.

    Parameters
    ----------
    ary : Numpy array
        An array containing MCMC samples

    Returns
    -------
    acov: Numpy array same size as the input array
    """
    axis = axis if axis > 0 else len(ary.shape) + axis
    n = ary.shape[axis]
    m = next_fast_len(2 * n)

    ary = ary - ary.mean(axis, keepdims=True)

    # added to silence tuple warning for a submodule
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        ifft_ary = np.fft.rfft(ary, n=m, axis=axis)
        ifft_ary *= np.conjugate(ifft_ary)

        shape = tuple(
            slice(None) if dim_len != axis else slice(0, n)
            for dim_len, _ in enumerate(ary.shape))
        cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape]
        cov /= n

    return cov
Ejemplo n.º 25
0
def get_envelope(data, axis=0):
    '''Extracts the instantaneous amplitude (envelope) of an analytic
    signal using the Hilbert transform'''
    n_samples = data.shape[axis]
    instantaneous_amplitude = np.abs(
        hilbert(data, N=next_fast_len(n_samples), axis=axis))
    return np.take(instantaneous_amplitude, np.arange(n_samples), axis=axis)
Ejemplo n.º 26
0
def hilbert(darray, N=None, dim=None):
    """
    Compute the analytic signal, using the Hilbert transform.
    The transformation is done along the selected dimension.

    Parameters
    ----------
    darray : xarray
        Signal data. Must be real.
    N : int, optional
        Number of Fourier components. Defaults to size along dim.
    dim : string, optional
        Axis along which to do the transformation.
        Uses the only dimension of darray is 1D.

    Returns
    -------
    darray : xarray
        Analytic signal of the Hilbert transform of 'darray' along selected axis.
    """
    dim = get_maybe_only_dim(darray, dim)
    n_orig = darray.shape[axis]
    N_unspecified = N is None
    if N_unspecified:
        N = next_fast_len(n_orig)
    out = xarray.apply_ufunc(_hilbert_wraper,
                             darray,
                             input_core_dims=[[dim]],
                             output_core_dims=[[dim]],
                             kwargs=dict(N=N,
                                         n_orig=n_orig,
                                         N_unspecified=N_unspecified))

    return out
Ejemplo n.º 27
0
    def __init__(self, file, sourcegrid=None, w='r'):

        self.w = w

        try:
            self.file = h5py.File(file, self.w)
        except IOError:
            msg = 'Unable to open input file ' + file
            raise IOError(msg)

        self.stats = dict(self.file['stats'].attrs)
        self.fdomain = self.stats['fdomain']
        self.sourcegrid = self.file['sourcegrid']
        
        try:
            self.data = self.file['data']
        except KeyError:
            self.data = self.file['data_z']
        if self.fdomain:
            self.freq = np.fft.rfftfreq(self.stats['npad'],
                                        d=1. / self.stats['Fs'])
        if 'npad' not in self.stats:
            if self.fdomain:
                self.stats['npad'] = 2 * self.stats['nt'] - 2
            else:
                self.stats['npad'] = next_fast_len(2 * self.stats['nt'] - 1)
Ejemplo n.º 28
0
    def _regen_kernel(self, XYZ):
        """
        Compute kernel.

        Parameters
        ----------
        XYZ : :py:class:`~numpy.ndarray`
            (N_antenna, 3) Cartesian instrument geometry.

            `XYZ` must be given in BFSF.
        """
        N_samples = fftpack.next_fast_len(self._NFS)
        lon_smpl = fourier.ffs_sample(self._T, self._NFS, self._Tc, N_samples)
        px_x, px_y, px_z = sph.pol2cart(1, self._grid_colat,
                                        lon_smpl.reshape(1, -1))
        pix_smpl = np.stack([px_x, px_y, px_z], axis=0)

        N_antenna = len(XYZ)
        N_height = len(self._grid_colat)

        # `self._NFS` assumes imaging is performed with `XYZ` centered at the origin.
        XYZ_c = XYZ - XYZ.mean(axis=0)
        window = func.Tukey(self._T, self._Tc, self._alpha_window)
        k_smpl = np.zeros((N_antenna, N_height, N_samples), dtype=self._cp)
        ne.evaluate('exp(A * B) * C',
                    dict(A=1j * 2 * np.pi / self._wl,
                         B=np.tensordot(XYZ_c, pix_smpl, axes=1),
                         C=window(lon_smpl)),
                    out=k_smpl,
                    casting='same_kind')  # Due to limitations of NumExpr2

        self._FSk = fourier.ffs(k_smpl, self._T, self._Tc, self._NFS, axis=2)
        self._XYZk = XYZ
Ejemplo n.º 29
0
def stack(data, stack_type='linear'):
    """
    Stack data by first axis.

    :type stack_type: str or tuple
    :param stack_type: Type of stack, one of the following:
        ``'linear'``: average stack (default),
        ``('pw', order)``: phase weighted stack of given order
        (see [Schimmel1997]_, order 0 corresponds to linear stack),
        ``('root', order)``: root stack of given order
        (order 1 corresponds to linear stack).
    """
    if stack_type == 'linear':
        stack = np.mean(data, axis=0)
    elif stack_type[0] == 'pw':
        from scipy.signal import hilbert
        try:
            from scipy.fftpack import next_fast_len
        except ImportError:  # scipy < 0.18
            next_fast_len = next_pow_2
        npts = np.shape(data)[1]
        nfft = next_fast_len(npts)
        anal_sig = hilbert(data, N=nfft)[:, :npts]
        norm_anal_sig = anal_sig / np.abs(anal_sig)
        phase_stack = np.abs(np.mean(norm_anal_sig, axis=0)) ** stack_type[1]
        stack = np.mean(data, axis=0) * phase_stack
    elif stack_type[0] == 'root':
        r = np.mean(np.sign(data) * np.abs(data)
                    ** (1 / stack_type[1]), axis=0)
        stack = np.sign(r) * np.abs(r) ** stack_type[1]
    else:
        raise ValueError('stack type is not valid.')
    return stack
Ejemplo n.º 30
0
    def colgin2009(self):
        """colgin


        Returns:
            [type]: [description]

        References
        ------------
        1) Colgin, L. L., Denninger, T., Fyhn, M., Hafting, T., Bonnevie, T., Jensen, O., ... & Moser, E. I. (2009). Frequency of gamma oscillations routes flow of information in the hippocampus. Nature, 462(7271), 353-357.
        2) Tallon-Baudry, C., Bertrand, O., Delpuech, C., & Pernier, J. (1997). Oscillatory γ-band (30–70 Hz) activity induced by a visual search task in humans. Journal of Neuroscience, 17(2), 722-734.
        """
        t_wavelet = np.arange(-4, 4, 1 / self.sampfreq)
        freqs = self.freqs
        n = len(self.lfp)
        fastn = next_fast_len(n)
        signal = np.pad(self.lfp, (0, fastn - n), "constant", constant_values=0)
        # signal = np.tile(np.expand_dims(signal, axis=0), (len(freqs), 1))
        # wavelet_at_freqs = np.zeros((len(freqs), len(t_wavelet)), dtype=complex)
        conv_val = np.zeros((len(freqs), n), dtype=complex)
        for i, freq in enumerate(freqs):
            sigma = 7 / (2 * np.pi * freq)
            A = (sigma * np.sqrt(np.pi)) ** -0.5
            wavelet_at_freq = (
                A
                * np.exp(-(t_wavelet ** 2) / (2 * sigma ** 2))
                * np.exp(2j * np.pi * freq * t_wavelet)
            )

            conv_val[i, :] = sg.fftconvolve(
                signal, wavelet_at_freq, mode="same", axes=-1
            )[:n]

        return np.abs(conv_val) ** 2
Ejemplo n.º 31
0
def fft(x,
        axes=None,
        center=True,
        normalize=False,
        backend=None,
        inverse=False,
        fft_backend=None,
        pad=False,
        y=None,
        allow_c2r=False):
    """Perform the FFT of an input."""
    # Get backend
    if backend is None:
        backend = getBackend(x)

    # Determine optimal size to pad, if desired
    original_size = shape(x)
    if pad:
        padded_size = list(original_size)
        for ind, d in enumerate(original_size):
            if next_fast_len(d) != d:
                padded_size[ind] = next_fast_len(d)
    else:
        padded_size = original_size

    # Get FFT functions
    fft_fun, ifft_fun = fftfuncs(padded_size,
                                 axes,
                                 center,
                                 normalize,
                                 getDatatype(x),
                                 backend,
                                 fft_backend,
                                 allow_c2r=allow_c2r)

    # Select FFT inverse
    FFT = fft_fun if not inverse else ifft_fun

    # Set correct byte order
    x[:] = setByteOrder(x, 'f')

    if padded_size is not original_size:
        return crop(FFT(pad(x, padded_size, center=True), y=y),
                    original_size,
                    center=True)
    else:
        return FFT(x, y=y)
Ejemplo n.º 32
0
def THDN(signal, fs, weight=None):
    """Measure the THD+N for a signal and print the results

    Prints the estimated fundamental frequency and the measured THD+N.  This is
    calculated from the ratio of the entire signal before and after
    notch-filtering.

    This notch-filters by nulling out the frequency coefficients ±10% of the
    fundamental

    TODO: Make R vs F reference a parameter (currently is R)
    TODO: Or report all of the above in a dictionary?

    """
    # Get rid of DC and window the signal
    signal = np.asarray(signal) + 0.0  # Float-like array
    # TODO: Do this in the frequency domain, and take any skirts with it?
    signal -= mean(signal)

    window = general_cosine(len(signal), flattops['HFT248D'])
    windowed = signal * window
    del signal

    # Zero pad to nearest power of two
    new_len = next_fast_len(len(windowed))
    windowed = concatenate((windowed, zeros(new_len - len(windowed))))

    # Measure the total signal before filtering but after windowing
    total_rms = rms_flat(windowed)

    # Find the peak of the frequency spectrum (fundamental frequency)
    f = rfft(windowed)
    i = argmax(abs(f))
    true_i = parabolic(log(abs(f)), i)[0]
    frequency = fs * (true_i / len(windowed))

    # Filter out fundamental by throwing away values ±10%
    lowermin = int(true_i * 0.9)
    uppermin = int(true_i * 1.1)
    f[lowermin: uppermin] = 0
    # TODO: Zeroing FFT bins is bad

    # Transform noise back into the time domain and measure it
    noise = irfft(f)
    # TODO: RMS and A-weighting in frequency domain?  Parseval?

    if weight is None:
        pass
    elif weight == 'A':
        # Apply A-weighting to residual noise (Not normally used for
        # distortion, but used to measure dynamic range with -60 dBFS signal,
        # for instance)
        noise = A_weight(noise, fs)
        # TODO: filtfilt? tail end of filter?
    else:
        raise ValueError('Weighting not understood')

    # TODO: Return a dict or list of frequency, THD+N?
    return rms_flat(noise) / total_rms
Ejemplo n.º 33
0
def LSDecompFW(wav, width= 16384, max_nnz_rate=8000.0/262144.0, sparsify = 0.01, taps = 10, 
               level = 3, wl_weight = 1, verbose = False,fc=120):
    
    MaxiterA = 60
   
    length = len(wav)
    
    
    n = sft.next_fast_len(length)
    
    signal = np.zeros((n))
    signal[0:length] = wav[0:length]
     
    h0,h1 = daubcqf(taps,'min')
    L = level
    
    
    #print(n)
    original_signal = lambda s: sft.idct(s[0:n]) + (1.0)*(wl_weight)*idwt(s[n+1:],h0,h1,L)[0]
    LSseparate = lambda x: np.concatenate([sft.dct(x),(1.0)*(wl_weight)*dwt(x,h0,h1,L)[0]],axis=0)
    
    #measurment
    y = signal 
    #FISTA
    ###############################
    cnnz = float("Inf")

    
    c = signal 
    temp = LSseparate(y)
    temp2 = original_signal(temp)
    print('aaa'+str(temp2.shape))
    
    maxabsThetaTy = max(abs(temp))
    
    while cnnz > max_nnz_rate * n:
            
        #FISTA
            tau = sparsify * maxabsThetaTy
            tolA = 1.0e-7
            
            fh = (original_signal,LSseparate)
            
            c = relax.fista(A=fh, b=y,x=LSseparate(c),tol=tolA,l=tau,maxiter=MaxiterA )[0]
            
            cnnz = np.size(np.nonzero(original_signal(c)))
            
            print('nnz = '+ str(cnnz)+ ' / ' + str(n) +' at tau = '+str(tau))
            sparsify = sparsify * 2
            if sparsify == 0.166:
                sparsify = 0.1
    signal_dct = sft.idct(c[0:n])
    signal_dct = signal_dct[0:length]
    signal_wl = (1.0) * float(wl_weight) * idwt(c[n+1:],h0,h1,level)[0] 
    signal_wl = signal_wl[0:length]
   
    return  signal_dct,signal_wl
Ejemplo n.º 34
0
import numpy as np
import os
import matplotlib.pyplot as plt
import h5py
from noisi.my_classes.basisfunction import BasisFunction
from noisi.util.source_masks import get_source_mask
from noisi.util.plot import plot_grid
try:
    from scipy.fftpack import next_fast_len
except ImportError:
    from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len
from obspy.signal.invsim import cosine_taper
import json

n = next_fast_len(2*n_samples-1)    
freq = np.fft.rfftfreq(n,d=1./sampling_rate)
print(freq.shape)
taper = cosine_taper(len(freq),0.005)

grd  = np.load(os.path.join(projectpath,'sourcegrid.npy'))
source_config = json.load(open(os.path.join(sourcepath,'source_config.json')))
bfunc_type = source_config['spectra_decomposition']
bfunc_K = source_config['spectra_nr_parameters']


b = BasisFunction(bfunc_type,bfunc_K,N=len(freq))



Ejemplo n.º 35
0
    print('Found wavefield.')
    with WaveField(wfs[0]) as wf:
        df = wf.stats['Fs']
        nt = wf.stats['nt']
        
else:
    df = float(input('Sampling rate of synthetic Greens functions in Hz?\n'))
    nt = int(input('Nr of time steps in synthetic Greens functions?\n'))





#s for the fft is larger due to zeropadding --> apparent higher frequency sampling\n",
    # n = next_fast_len(2*nt-1)
n = next_fast_len(2*nt-1)    
freq = np.fft.rfftfreq(n,d=1./df)
taper = cosine_taper(len(freq),0.01)
print('Determined frequency axis.')

def get_distance(grid,location):
    def f(lat,lon,location):
        return abs(gps2dist_azimuth(lat,lon,location[0],location[1])[0])
    dist = np.array([f(lat,lon,location) for lat,lon in zip(grid[1],grid[0])])
    return dist

# Use Basemap to figure out where ocean is
def get_ocean_mask():
    print('Getting ocean mask...')
    from mpl_toolkits.basemap import Basemap
    latmin = grd[1].min()
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), 
                           overlap_ratio=3 / 10):
    """
    Masked normalized cross-correlation between arrays.

    Parameters
    ----------
    arr1 : ndarray
        First array.
    arr2 : ndarray
        Seconds array. The dimensions of `arr2` along axes that are not
        transformed should be equal to that of `arr1`.
    m1 : ndarray
        Mask of `arr1`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m1` should have the same shape as `arr1`.
    m2 : ndarray
        Mask of `arr2`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m2` should have the same shape as `arr2`.
    mode : {'full', 'same'}, optional
        'full':
            This returns the convolution at each point of overlap. At
            the end-points of the convolution, the signals do not overlap
            completely, and boundary effects may be seen.
        'same':
            The output is the same size as `arr1`, centered with respect
            to the `‘full’` output. Boundary effects are less prominent.
    axes : tuple of ints, optional
        Axes along which to compute the cross-correlation.
    overlap_ratio : float, optional
        Minimum allowed overlap ratio between images. The correlation for
        translations corresponding with an overlap ratio lower than this 
        threshold will be ignored. A lower `overlap_ratio` leads to smaller 
        maximum translation, while a higher `overlap_ratio` leads to greater 
        robustness against spurious matches due to small overlap between 
        masked images.

    Returns
    -------
    out : ndarray
        Masked normalized cross-correlation.

    Raises
    ------
    ValueError : if correlation `mode` is not valid, or array dimensions along
        non-transformation axes are not equal.

    References
    ----------
    .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
           IEEE Transactions on Image Processing, vol. 21(5), 
           pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
    .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and 
           Pattern Recognition, pp. 2918-2925 (2010).  
           :DOI:`10.1109/CVPR.2010.5540032`
    """
    if mode not in {'full', 'same'}:
        raise ValueError("Correlation mode {} is not valid.".format(mode))

    fixed_image = np.array(arr1, dtype=np.float)
    fixed_mask = np.array(m1, dtype=np.bool)
    moving_image = np.array(arr2, dtype=np.float)
    moving_mask = np.array(m2, dtype=np.bool)
    eps = np.finfo(np.float).eps

    # Array dimensions along non-transformation axes should be equal.
    all_axes = set(range(fixed_image.ndim))
    for axis in (all_axes - set(axes)):
        if fixed_image.shape[axis] != moving_image.shape[axis]:
            raise ValueError(
                "Array shapes along non-transformation axes should be "
                    "equal, but dimensions along axis {a} not".format(a=axis))

    # Determine final size along transformation axes
    # Note that it might be faster to compute Fourier transform in a slightly
    # larger shape (`fast_shape`). Then, after all fourier transforms are done,
    # we slice back to`final_shape` using `final_slice`.
    final_shape = list(arr1.shape)
    for axis in axes:
        final_shape[axis] = fixed_image.shape[axis] + \
            moving_image.shape[axis] - 1
    final_shape = tuple(final_shape)
    final_slice = tuple([slice(0, int(sz)) for sz in final_shape])

    # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
    # 7)
    fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])

    # We use numpy's fft because it allows to leave transform axes unchanged,
    # which is not possible with SciPy's fftn/ifftn
    # E.g. arr shape (2,3,7), transform along axes (0, 1) with shape (4,4)
    # results in arr_fft shape (4,4, 7)
    fft = partial(np.fft.fftn, s=fast_shape, axes=axes)
    ifft = partial(np.fft.ifftn, s=fast_shape, axes=axes)

    fixed_image[np.logical_not(fixed_mask)] = 0.0
    moving_image[np.logical_not(moving_mask)] = 0.0

    # N-dimensional analog to rotation by 180deg is flip over all relevant axes.
    # See [1] for discussion.
    rotated_moving_image = _flip(moving_image, axes=axes)
    rotated_moving_mask = _flip(moving_mask, axes=axes)

    fixed_fft = fft(fixed_image)
    rotated_moving_fft = fft(rotated_moving_image)
    fixed_mask_fft = fft(fixed_mask)
    rotated_moving_mask_fft = fft(rotated_moving_mask)

    # Calculate overlap of masks at every point in the convolution.
    # Locations with high overlap should not be taken into account.
    number_overlap_masked_px = np.real(
        ifft(rotated_moving_mask_fft * fixed_mask_fft))
    number_overlap_masked_px[:] = np.round(number_overlap_masked_px)
    number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps)
    masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
    masked_correlated_rotated_moving_fft = ifft(
        fixed_mask_fft * rotated_moving_fft)

    numerator = ifft(rotated_moving_fft * fixed_fft)
    numerator -= masked_correlated_fixed_fft * \
        masked_correlated_rotated_moving_fft / number_overlap_masked_px

    fixed_squared_fft = fft(np.square(fixed_image))
    fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
    fixed_denom -= np.square(masked_correlated_fixed_fft) / \
        number_overlap_masked_px
    fixed_denom[:] = np.fmax(fixed_denom, 0.0)

    rotated_moving_squared_fft = fft(np.square(rotated_moving_image))
    moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
    moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \
        number_overlap_masked_px
    moving_denom[:] = np.fmax(moving_denom, 0.0)

    denom = np.sqrt(fixed_denom * moving_denom)

    # Slice back to expected convolution shape.
    numerator = numerator[final_slice]
    denom = denom[final_slice]
    number_overlap_masked_px = number_overlap_masked_px[final_slice]

    if mode == 'same':
        _centering = partial(_centered,
                             newshape=fixed_image.shape, axes=axes)
        denom = _centering(denom)
        numerator = _centering(numerator)
        number_overlap_masked_px = _centering(number_overlap_masked_px)

    # Pixels where `denom` is very small will introduce large
    # numbers after division. To get around this problem,
    # we zero-out problematic pixels.
    tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True)
    nonzero_indices = denom > tol

    out = np.zeros_like(denom)
    out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
    np.clip(out, a_min=-1, a_max=1, out=out)

    # Apply overlap ratio threshold
    number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px,
                                                 axis=axes, keepdims=True)
    out[number_overlap_masked_px < number_px_threshold] = 0.0

    return out
Ejemplo n.º 37
0
def circular_resample(x, Fs, tau, pad_to_next_fast_len=True):
    '''Circularly resample (i.e. shift) signal `x` by delay `tau`.

    The resampling is "circular" in that it uses the FFT, which maps
    a finite `N`-length sequence onto the unit circle with the implicit
    assumption of periodic boundary conditions. Resampling `x` with a
    time shift `tau` then corresponds to a `Fs * tau`-rotation of the
    sampling points about the unit circle. Because of the periodicity
    assumption, this rotation can lead to "wrap-around" artifacts.
    Accounting for trigger offsets, for example, typically requires
    small rotations such that the wrap-around effects are negligible.
    Regardless, wrap-around effects can be minimized by appropriately
    zero-padding `x` prior to calling this function.

    Input parameters:
    -----------------
    x - array_like, `(N,)`
        The uniformly sampled, real sequence to be circularly resampled.
        If `x` is not real, a ValueError is raised. (Note that this
        algorithm could readily be extended to work with complex signals,
        but the `random_data` package is optimized for real-valued
        signals in the time domain).
        [x] = arbitrary units

    Fs - float
        Sample rate of sequence `x`.
        [Fs] = arbitrary units

    tau - float
        If the original sequence `x` has corresponding sample times `t`,
        then the circularly resampled sequence will correspond to sample
        times `t + tau`. Thus, positive `tau` corresponds to signal delay,
        and negative `tau` corresponds to signal advancement. Note that
        `tau` need *not* be an integer multiple of the sample spacing.
        [tau] = 1 / [Fs]

    pad_to_next_fast_len - bool
        If True, prior to computing the FFT, pad `x` with zeros up to the
        next-largest "5-smooth" number, for which Numpy's FFTPACK routines
        can efficiently compute the FFT.

    Returns:
    --------
    xr - array_like, `(N,)`
        The circularly resampled, real sequence.
        [xr] = [x]

    '''
    if np.iscomplexobj(x):
        raise ValueError('`x` must be a real-valued array')

    N = len(x)

    if pad_to_next_fast_len:
        N = next_fast_len(N)

    xhat = np.fft.rfft(x, n=N)
    f = np.fft.rfftfreq(N, d=(1. / Fs))

    # Apply linear phase rotation corresponding to delay `tau`
    xhat *= np.exp(1j * 2 * np.pi * f * tau)

    # Per the documentation, `n` must be specified if an odd number
    # of output points is desired (i.e. if the `N` used in the forward
    # FFT computation above is odd)
    xr = np.fft.irfft(xhat, n=N)

    # Finally, if `x` was zero padded prior to computing the FFT,
    # the above computed `xr` does *not* have the same length as `x`.
    # Return only first `len(x)` values of circularly resampled sequence.
    return xr[:len(x)]
Ejemplo n.º 38
0
def test_circular_resample():
    # Complex input should raise ValueError:
    # ======================================
    x = np.zeros(10, dtype='complex')
    tools.assert_raises(
        ValueError,
        rd.signals.sampling.circular_resample,
        *[x, 1., 0])

    # Null (i.e. no shift):
    # =====================

    # N even, 5-smooth number:
    # ------------------------
    N = 10
    x = np.arange(N)
    Fs = 1.
    tau = 0

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(xr, x)

    # N even, *not* a 5-smooth number:
    # --------------------------------
    N = 14
    x = np.arange(N)
    Fs = 1.
    tau = 0

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(xr, x)

    # N odd, 5-smooth number:
    # -----------------------
    N = 9
    x = np.arange(N)
    Fs = 1.
    tau = 0

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(xr, x)

    # N odd, *not* a 5-smooth number:
    # -------------------------------
    N = 11
    x = np.arange(N)
    Fs = 1.
    tau = 0

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(xr, x)

    # Positive, integer shift:
    # ========================

    # N even, 5-smooth number:
    # ------------------------
    N = 10
    x = np.arange(N)
    Fs = 1.
    tau = 1

    # No zero padding of `x`, so simply moves `x[0]` to end of array
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate((x[1:], [x[0]])))

    # N even, *not* a 5-smooth number:
    # --------------------------------
    N = 14
    x = np.arange(N)
    Fs = 1.
    tau = 1

    # Zero padding of `x` means that `x[0]` is cycled out of the
    # viewing window and a `0` is cycled into the end of `x`
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate((x[1:], [0])))

    # N odd, 5-smooth number:
    # -----------------------
    N = 9
    x = np.arange(N)
    Fs = 1.
    tau = 1

    # No zero padding of `x`, so simply moves `x[0]` to end of array
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate((x[1:], [x[0]])))

    # N odd, *not* a 5-smooth number:
    # -------------------------------
    N = 11
    x = np.arange(N)
    Fs = 1.
    tau = 1

    # Zero padding of `x` means that `x[0]` is cycled out of the
    # viewing window and a `0` is cycled into the end of `x`
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate((x[1:], [0])))

    # Negative, integer shift:
    # ========================

    # N even, 5-smooth number:
    # ------------------------
    N = 10
    x = np.arange(N)
    Fs = 1.
    tau = -1

    # No zero padding of `x`, so simply moves `x[-1]` to start of array
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate(([x[-1]], x[:-1])))

    # N even, *not* a 5-smooth number:
    # --------------------------------
    N = 14
    x = np.arange(N)
    Fs = 1.
    tau = -1

    # Zero padding of `x` means that `x[-1]` is cycled out of the
    # viewing window and a `0` is cycled into the start of `x`
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate(([0], x[:-1])))

    # N odd, 5-smooth number:
    # -----------------------
    N = 9
    x = np.arange(N)
    Fs = 1.
    tau = -1

    # No zero padding of `x`, so simply moves `x[-1]` to start of array
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate(([x[-1]], x[:-1])))

    # N odd, *not* a 5-smooth number:
    # -------------------------------
    N = 11
    x = np.arange(N)
    Fs = 1.
    tau = -1

    # Zero padding of `x` means that `x[-1]` is cycled out of the
    # viewing window and a `0` is cycled into the start of `x`
    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        np.concatenate(([0], x[:-1])))

    # Positive, non-integer shift:
    # ============================
    #
    # Note that `circular_resample` uses the FFT, which maps a finite
    # `N`-length sequence onto the unit circle with the implicit assumption
    # of periodic boundary conditions. The resulting periodicity allows
    # representation of the signal with an `N`-term Fourier series.
    #
    # Now, after being mapped onto the unit circle, the ramping sequence
    # used in the above computations (`x = np.arange(N)`) corresponds to a
    # periodic sawtooth signal. It is well-known that the Fourier series
    # does a poor job of representing the underlying signal at sharp edges
    # (i.e. the Gibbs phenomenon), such as the vertices of the sawteeth.
    # The Gibbs phenomenon becomes readily apparent when shifting the
    # sawtooth signal by a non-integer number of samples.
    #
    # Because of this, it makes more sense to use a simpler signal, such
    # as a pure sinusoid with period equal to the sequence length, to check
    # that `circular_resample` is performing as expected for non-integer
    # shifts. Note that this test only works well if we do *not* zero pad
    # the input signal to a 5-smooth number (i.e. `len(x)` needs to already
    # be a 5-smooth number, or the `pad_to_next_fast_len` keyword argument
    # must be `False`).

    def unity_frequency_sine(t):
        'Get unity frequency sine wave.'
        f0 = 1
        return np.sin(2 * np.pi * f0 * t)

    def period_N_sine_wrapper(N):
        'Get `N`-length sine wave containing exactly *one* period.'
        t = np.arange(0, N, dtype='float') / N
        Fs = 1. / (t[1] - t[0])
        x = unity_frequency_sine(t)

        return t, Fs, x

    # N even, 5-smooth number:
    # ------------------------
    N = 10
    t, Fs, x = period_N_sine_wrapper(N)
    tau = 0.5 / Fs

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        unity_frequency_sine(t + tau))

    # N odd, 5-smooth number:
    # -----------------------
    N = 9
    t, Fs, x = period_N_sine_wrapper(N)
    tau = 0.5 / Fs

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        unity_frequency_sine(t + tau))

    # Negative, non-integer shift:
    # ============================

    # N even, 5-smooth number:
    # ------------------------
    N = 10
    t, Fs, x = period_N_sine_wrapper(N)
    tau = -0.5 / Fs

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        unity_frequency_sine(t + tau))

    # N odd, 5-smooth number:
    # -----------------------
    N = 9
    t, Fs, x = period_N_sine_wrapper(N)
    tau = -0.5 / Fs

    xr = rd.signals.sampling.circular_resample(
        x, Fs, tau, pad_to_next_fast_len=True)

    np.testing.assert_almost_equal(
        xr,
        unity_frequency_sine(t + tau))

    # Spectrum for non-integer shift, N *not* a 5-smooth number:
    # ==========================================================

    # Construct signal:
    # -----------------
    # Desired number of points in signal; *not* a 5-smooth number
    N = 63331
    tools.assert_not_equal(
        N,
        next_fast_len(N),
        msg='`N` *is* a 5-smooth number; testing requires non-5-smooth number')

    # Translate `N` into values usable by `random_data` routines
    N_power_of_2 = np.int(2 ** np.ceil(np.log2(N)))
    Fs = 1.
    t0 = 0
    T = N_power_of_2 / Fs

    # Parameters of broadband spectrum
    f0_broad = 0.
    tau_broad = 2.
    G0 = 1.
    noise_floor = 1e-2
    seed = None

    # Broadband signal, where `len(sig.x)` is a power of 2
    sig = rd.signals.RandomSignal(
        Fs=Fs, t0=t0, T=T,
        f0=f0_broad, tau=tau_broad, G0=G0,
        noise_floor=noise_floor, seed=seed)

    # Only take the first `N` points of `sig.x`, producing
    # a signal with a length that is *not* a 5-smooth number
    x = sig.x[:N]
    t = sig.t()[:N]

    tools.assert_not_equal(
        len(x),
        next_fast_len(len(x)),
        msg='`len(x)` *is* a 5-smooth number; non-5-smooth number required')

    # Circularly resample signal:
    # ---------------------------
    # Integer shift
    tau_1 = 1. / Fs
    x_1 = rd.signals.sampling.circular_resample(
        x, Fs, tau_1, pad_to_next_fast_len=True)

    # Non-integer shift
    tau_05 = 0.5 / Fs
    x_05 = rd.signals.sampling.circular_resample(
        x, Fs, tau_05, pad_to_next_fast_len=True)

    # Compare the spectra of the original and time-shifted signals:
    # -------------------------------------------------------------
    # Spectral-estimation parameters
    Nreal_per_ens = 1000
    Tens = t[-1] - t[0]  # Define ensemble to be full length of `x`

    # Estimate spectra
    asd = rd.spectra.AutoSpectralDensity(
        x, Fs=Fs, t0=t0,
        Tens=Tens, Nreal_per_ens=Nreal_per_ens)
    asd_1 = rd.spectra.AutoSpectralDensity(
        x_1, Fs=Fs, t0=t0,
        Tens=Tens, Nreal_per_ens=Nreal_per_ens)
    asd_05 = rd.spectra.AutoSpectralDensity(
        x_05, Fs=Fs, t0=t0,
        Tens=Tens, Nreal_per_ens=Nreal_per_ens)

    # Relative error of time-shifted spectra relative to original
    relerr_1 = np.squeeze((asd_1.Gxx - asd.Gxx) / asd.Gxx)
    relerr_05 = np.squeeze((asd_05.Gxx - asd.Gxx) / asd.Gxx)

    # Relative random error of an autospectral density estimate
    # is 1 / sqrt(Nreal_per_ens). Check that the relative errors
    # of the time-shifted spectra are "not too much larger" than
    # the random error expected for the unshifted spectral estimate.
    # The "not too much larger" is quantified by `fudge_factor`.
    # Note that a fudge factor of 2 is larger than typically needed,
    # but it ensures robust testing in the presence of rare
    # statistical events that more severely affect the shift
    # calculation.
    fudge_factor = 2.
    max_relerr = fudge_factor / np.sqrt(Nreal_per_ens)

    tools.assert_less_equal(
        np.max(np.abs(relerr_1)),
        max_relerr)

    tools.assert_less_equal(
        np.max(np.abs(relerr_05)),
        max_relerr)

    return