Example #1
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = signal.tukey(100, 0)
        box0 = signal.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = signal.tukey(100, 1)
        han1 = signal.hann(100)
        assert_array_almost_equal(tuk1, han1)
Example #2
0
def compute_motion_shifts(scan, template, in_place=True, num_threads=8):
    """ Compute shifts in y and x for rigid subpixel motion correction.

    Returns the number of pixels that each image in the scan was to the right (x_shift)
    or below (y_shift) the template. Negative shifts mean the image was to the left or
    above the template.

    :param np.array scan: 2 or 3-dimensional scan (image_height, image_width[, num_frames]).
    :param np.array template: 2-d template image. Each frame in scan is aligned to this.
    :param bool in_place: Whether the scan can be overwritten.
    :param int num_threads: Number of threads used for the ffts.

    :returns: (y_shifts, x_shifts) Two arrays (num_frames) with the y, x motion shifts.

    ..note:: Based in imreg_dft.translation().
    """
    import pyfftw
    from imreg_dft import utils

    # Add third dimension if scan is a single image
    if scan.ndim == 2:
        scan = np.expand_dims(scan, -1)

    # Get some params
    image_height, image_width, num_frames = scan.shape
    taper = np.outer(signal.tukey(image_height, 0.2), signal.tukey(image_width, 0.2))

    # Prepare fftw
    frame = pyfftw.empty_aligned((image_height, image_width), dtype='complex64')
    fft = pyfftw.builders.fft2(frame, threads=num_threads, overwrite_input=in_place,
                               avoid_copy=True)
    ifft = pyfftw.builders.ifft2(frame, threads=num_threads, overwrite_input=in_place,
                                 avoid_copy=True)

    # Get fourier transform of template
    template_freq = fft(template * taper).conj() # we only need the conjugate
    abs_template_freq = abs(template_freq)
    eps = abs_template_freq.max() * 1e-15

    # Compute subpixel shifts per image
    y_shifts = np.empty(num_frames)
    x_shifts = np.empty(num_frames)
    for i in range(num_frames):
        # Compute correlation via cross power spectrum
        image_freq = fft(scan[:, :, i] * taper)
        cross_power = (image_freq * template_freq) / (abs(image_freq) * abs_template_freq + eps)
        shifted_cross_power = np.fft.fftshift(abs(ifft(cross_power)))

        # Get best shift
        shifts = np.unravel_index(np.argmax(shifted_cross_power), shifted_cross_power.shape)
        shifts = utils._interpolate(shifted_cross_power, shifts, rad=3)

        # Map back to deviations from center
        y_shifts[i] = shifts[0] - image_height // 2
        x_shifts[i] = shifts[1] - image_width // 2

    return y_shifts, x_shifts
Example #3
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = signal.tukey(100, 0)
        box0 = signal.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = signal.tukey(100, 1)
        han1 = signal.hann(100)
        assert_array_almost_equal(tuk1, han1)
Example #4
0
def compute_motion_shifts(scan, template, in_place=True, num_threads=8):
    """ Compute shifts in y and x for rigid subpixel motion correction.

    Returns the number of pixels that each image in the scan was to the right (x_shift)
    or below (y_shift) the template. Negative shifts mean the image was to the left or
    above the template.

    :param np.array scan: 2 or 3-dimensional scan (image_height, image_width[, num_frames]).
    :param np.array template: 2-d template image. Each frame in scan is aligned to this.
    :param bool in_place: Whether the scan can be overwritten.
    :param int num_threads: Number of threads used for the ffts.

    :returns: (y_shifts, x_shifts) Two arrays (num_frames) with the y, x motion shifts.

    ..note:: Based in imreg_dft.translation().
    """
    import pyfftw
    from imreg_dft import utils

    # Add third dimension if scan is a single image
    if scan.ndim == 2:
        scan = np.expand_dims(scan, -1)

    # Get some params
    image_height, image_width, num_frames = scan.shape
    taper = np.outer(signal.tukey(image_height, 0.2), signal.tukey(image_width, 0.2))

    # Prepare fftw
    frame = pyfftw.empty_aligned((image_height, image_width), dtype='complex64')
    fft = pyfftw.builders.fft2(frame, threads=num_threads, overwrite_input=in_place,
                               avoid_copy=True)
    ifft = pyfftw.builders.ifft2(frame, threads=num_threads, overwrite_input=in_place,
                                 avoid_copy=True)

    # Get fourier transform of template
    template_freq = fft(template * taper).conj() # we only need the conjugate
    abs_template_freq = abs(template_freq)
    eps = abs_template_freq.max() * 1e-15

    # Compute subpixel shifts per image
    y_shifts = np.empty(num_frames)
    x_shifts = np.empty(num_frames)
    for i in range(num_frames):
        # Compute correlation via cross power spectrum
        image_freq = fft(scan[:, :, i] * taper)
        cross_power = (image_freq * template_freq) / (abs(image_freq) * abs_template_freq + eps)
        shifted_cross_power = np.fft.fftshift(abs(ifft(cross_power)))

        # Get best shift
        shifts = np.unravel_index(np.argmax(shifted_cross_power), shifted_cross_power.shape)
        shifts = utils._interpolate(shifted_cross_power, shifts, rad=3)

        # Map back to deviations from center
        y_shifts[i] = shifts[0] - image_height // 2
        x_shifts[i] = shifts[1] - image_width // 2

    return y_shifts, x_shifts
Example #5
0
 def __init__(self, x, r, sym=True):
     super().__init__()
     L = x.length
     if sym:
         data = signal.tukey(L, r, sym=sym)
         self._make_me(x, data)
     else:
         data = signal.tukey(2*L, r*2, sym=sym)
         data = data[L:]
         self._make_me(x, data)
Example #6
0
def AnalyticSignalFeatures(x,dt,gates):

    from scipy.signal import tukey


    F = []

    for xx in x:

        xa = hilbert(xx.copy())

        xa = xa[abs(xa).argmax()::]/abs(xa).max()

        t = linspace(0,len(xa)*dt,len(xa))

        i1m = abs(xa[(t>=gates[0][0])&(t<=gates[0][1])]).argmax() + round(gates[0][0]/dt)

        il1,ir1 = PeakLimits(abs(xa),i1m,db=-20)

        i2m = abs(xa[(t>=gates[1][0])&(t<=gates[1][1])]).argmax() + round(gates[1][0]/dt)

        il2,ir2 = PeakLimits(abs(xa),i2m,db=-35)

        x1a = abs(xa[il1:ir1])
        x1a = x1a*tukey(len(x1a),1.)

        x2a = abs(xa[il2:ir2])
        x2a = x2a*tukey(len(x2a),0.1)

        x1m = moments(x1a,t[il1:ir1])

        x2m = moments(x2a,t[il2:ir2])

        # plot(x1a)
        # plot(x2a)

        # show()



        # x1 = xa[(t>=gates[0][0])&(t<=gates[0][1])]

        # x1m = moments(abs(x1),t[(t>=gates[0][0])&(t<=gates[0][1])])

        # # x2 = xa[(t>=gates[1][0])&(t<=gates[1][1])]

        # x2m = moments(abs(x2),t[(t>=gates[1][0])&(t<=gates[1][1])])


        # F.append([x2m[0]/x1m[0],(gates[1][0]+x2m[1])-(gates[0][0]+x1m[1]),x2m[2]-x1m[2],x2m[3]-x1m[3],x2m[4]-x1m[4]])

        F.append([(x2m[0]-x1m[0])/x1m[0],(x2m[1]-x1m[1])/x1m[1],(x2m[2]-x1m[2])/x1m[2],(x2m[3]-x1m[3])/x1m[3],(x2m[4]-x1m[4])/x1m[4]])

    return F
Example #7
0
    def _make_template(self, crop):
        temp = {}
        temp['raw'] = crop.to(self.device)
        temp['im'] = torch_to_img(crop)
        temp['kernel'] = self._net.template(temp['raw'])

        # add the tukey window to the temp for comparison
        alpha = self._cfg.tukey_alpha
        win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))
        temp['compare'] = temp['kernel'] * torch.Tensor(win).to(self.device)
        return temp
Example #8
0
    def _make_template(self, crop):
        temp = {}
        temp["raw"] = crop.to(self.device)
        temp["im"] = torch_to_img(crop)
        temp["kernel"] = self._net.feature(temp["raw"])

        # add the tukey window to the temp for comparison
        alpha = self._cfg.tukey_alpha
        win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))
        temp["compare"] = temp["kernel"] * torch.Tensor(win).to(self.device)
        return temp
Example #9
0
    def _tukey_window(self, width, alpha):
        """
        Generates a tukey window

        0 <= alpha <=1

        alpha = 0 becomes rectangular
        alpha = 1 becomes a Hann window
        """
        return (ssignal.tukey(width[0], alpha).reshape(
            (-1, 1, 1)) * ssignal.tukey(width[1], alpha).reshape(
                (-1, 1)) * ssignal.tukey(width[2], alpha))
Example #10
0
    def _tukey_window(self, width, alpha):
        """
        Generates a tukey window

        0 <= alpha <=1

        alpha = 0 becomes rectangular
        alpha = 1 becomes a Hann window
        """
        return (ssignal.tukey(width[0], alpha).reshape((-1,1,1)) *
                ssignal.tukey(width[1], alpha).reshape((-1,1)) *
                ssignal.tukey(width[2], alpha))
Example #11
0
File: stack.py Project: zhoupc/ease
    def _make_tuples(self, key):
        """ Compute raster phase discarding top and bottom 15% of slices and tapering
        edges to avoid edge artifacts."""
        print('Computing raster correction for ROI', key)

        # Get some params
        res = (StackInfo.ROI() & key).fetch1('bidirectional', 'roi_px_height',
                                             'roi_px_width', 'field_ids')
        is_bidirectional, image_height, image_width, field_ids = res
        correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1

        if is_bidirectional:
            # Read the ROI
            filename_rel = (experiment.Stack.Filename() &
                            (StackInfo.ROI() & key))
            roi_filename = filename_rel.local_filenames_as_wildcard
            roi = scanreader.read_scan(roi_filename)

            # Compute some parameters
            skip_fields = max(1, int(round(len(field_ids) * 0.10)))
            taper = np.sqrt(
                np.outer(signal.tukey(image_height, 0.4),
                         signal.tukey(image_width, 0.4)))

            # Compute raster phase for each slice and take the median
            raster_phases = []
            for field_id in field_ids[skip_fields:-2 * skip_fields]:
                # Create template (average frame tapered to avoid edge artifacts)
                slice_ = roi[field_id, :, :,
                             correction_channel, :].astype(np.float32,
                                                           copy=False)
                anscombed = 2 * np.sqrt(slice_ - slice_.min(axis=(0, 1)) +
                                        3 / 8)  # anscombe transform
                template = np.mean(anscombed, axis=-1) * taper

                # Compute raster correction
                raster_phases.append(
                    galvo_corrections.compute_raster_phase(
                        template, roi.temporal_fill_fraction))
            raster_phase = np.median(raster_phases)
            raster_std = np.std(raster_phases)
        else:
            raster_phase = 0
            raster_std = 0

        # Insert
        self.insert1({
            **key, 'raster_phase': raster_phase,
            'raster_std': raster_std
        })

        self.notify(key)
 def window(self,eta):
     nz = self.nz * 2
     nx = self.nx * 2
     ny = self.ny * 2
     wd = np.zeros((nz,nx,ny))
     wind = signal.tukey(nx, alpha=eta, sym=True)
     wz = signal.tukey(nz, alpha=eta, sym=True)
     wx = np.tile(wind,(nx,1))
     wy = wx.swapaxes(0,1)
     w = wx * wy
     for i in range(nz):
         wd[i,:,:,] = w * wz[i]
     return wd
Example #13
0
    def _tukey_window(self, width, alpha):
        """
        Generates a tukey window

        0 <= alpha <=1

        alpha = 0 becomes rectangular
        alpha = 1 becomes a Hann window
        """
        from scipy.signal import tukey
        return (tukey(width[0], alpha).reshape(
            (-1, 1, 1)) * tukey(width[1], alpha).reshape(
                (-1, 1)) * tukey(width[2], alpha))
Example #14
0
def get_reconstruction(tiff_path, discs, row, params):
    # Read the tiff file
    imgs = read_tiff(tiff_path)

    if imgs[0].shape[0] != imgs[0].shape[1]:
        print('The input image is not square. The image will be cropped to be a*a where `a` is the smallest dimension.')
        a = min(imgs[0].shape[0], imgs[0].shape[1])
        slice_x, slice_y = slice(0, a), slice(0, a)
        imgs = [img[slice_x, slice_y] for img in imgs]
    
    window = params['window']
    a, p, sig = params['a'], params['p'], params['sig']
    do_psd = params['do_psd']
    
    # Apodization
    width, height = imgs[0].shape  # Images aren't supposed to have 3rd dimension
    if window is not None and window.lower()=='gaussian':
        w = np.outer(signal.general_gaussian(width, p=p, sig=sig), signal.general_gaussian(width, p=p, sig=sig))
    elif window is not None and window.lower()=='tukey':
        w = np.outer(signal.tukey(width, alpha=a), signal.tukey(height, alpha=a))
    elif window is None or window.lower() is 'none':
        w=1

    imgs = [w*img for img in tqdm(imgs, desc='Processing Apodization', leave=False)]
    
    # Periodic Smooth Decomposition
    if do_psd:
        imgs = [psd(img)[0] for img in tqdm(imgs, desc='Processing PSD', leave=False)]
    
    imgs = [cp.array(img) for img in imgs]  # Transfer to GPU

    IMAGESIZE = imgs[0].shape[0]
    scale = params['scale']            
    hres_size = (IMAGESIZE * scale, IMAGESIZE * scale)

    # Remove keys not used by the reconstruction algo
    prms = {k: params[k] for k in params.keys() - ['scale', 'do_psd', 'window', 'a', 'p', 'sig']}
    
    # Reconstruction
    print('Performing Reconstruction...', end='')
    obj, pupil = reconstruct_v2(
        imgs,
        discs,
        row,
        hres_size,
        **prms
    )
    print('Done!')
    
    return obj, pupil, imgs
Example #15
0
def fixdata():
    for file in glob("h_psi4/*.dat"):
        '''load in the data of a waveform'''
        file_data = np.loadtxt(file)
        t = file_data[:, 0]
        y1 = file_data[:, 1]
        y2 = file_data[:, 2]
        dt = t[1] - t[0]
        t = np.append(t, t[-1] + (dt * np.array(range(1, 151))))
        y1 = np.append(y1, np.zeros(150))
        y2 = np.append(y2, np.zeros(150))
        '''find the mean of the data, looking only at array points that are different than zero'''
        sum = 0
        len = 0
        for x in y1:
            if x != 0:
                sum = sum + x
                len += 1

        m = sum / len
        data_meaned = y1 - m * np.ones(np.size(y1))
        '''find index where data crosses zero by looking at the change of sign -- +1 is necessary
        if we want to find the value after the crossing'''
        zero_crossings = (np.where(np.diff(np.sign(data_meaned)))[0])
        print("zeros are", zero_crossings)
        '''Apply the tukey window: identify if the file is of a merger or non merger by the amount of times the
        data crosses the mean (more than 7 and it will definitely be a merger, and won't require windowing, as
        there is no offset); one waveform has only 2 crossings, so needed to identify the particular case;
        the window is produced and applied to the data'''
        if np.size(zero_crossings) > 3 and np.size(zero_crossings) < 11:
            window = signal.tukey(np.size(t[:zero_crossings[2]]), alpha=0.05)
            while np.size(window) < np.size(t):
                window = np.append(window, 0)
            y1_windowed = y1 * window
            y2_windowed = y2 * window
            print("used window 3-7")
        elif np.size(zero_crossings) < 3:
            window = signal.tukey(np.size(t[:zero_crossings[1]]), alpha=0.05)
            while np.size(window) < np.size(t):
                window = np.append(window, 0)
            y1_windowed = y1 * window
            y2_windowed = y2 * window
            print("used window -3")
        else:
            y1_windowed = y1
            y2_windowed = y2
            print("not windowed")
        new_file_data = np.stack((t, y1_windowed, y2_windowed), axis=1)
        np.savetxt("waveforms/" + file.split('/')[1], new_file_data)
    return
Example #16
0
def make_playback_sounds():
    fs = 192000

    # define a common pl.ayback sounds length:
    common_length = 0.2
    numsamples_comlength = int(fs*common_length)

    # Create the calibration chirp : 
    chirp_durn = 0.003
    t_chirp = np.linspace(0,chirp_durn, int(fs*chirp_durn))
    start_f, end_f = 15000, 95000
    sweep = signal.chirp(t_chirp,start_f, t_chirp[-1], end_f, 'linear')
    sweep *= signal.tukey(sweep.size, 0.9)
    sweep *= 0.5

    silence_durn = 0.1
    silence_samples = int(silence_durn*fs)
    silence = np.zeros(silence_samples)
    # create 5 sweeps with silences before & after them :
    sweep_w_leftsilence = np.concatenate((silence,sweep))
    numsamples_to_add = numsamples_comlength - sweep_w_leftsilence.size
    sweep_w_silences = np.concatenate((sweep_w_leftsilence,np.zeros(numsamples_to_add)))
    sweep_w_silences = np.float32(sweep_w_silences)

    all_sweeps = []
    #make a set of 5 sweeps in a row =
    for i in range(5):
        all_sweeps.append(sweep_w_silences)

    # create a set of sinusoidal pulses :
    pulse_durn = 0.1
    pulse_samples = int(fs*pulse_durn)
    t_pulse = np.linspace(0,pulse_durn, pulse_samples)
    pulse_start_f, pulse_end_f  = 10000, 95000
    frequency_step = 1000;
    freqs = np.arange(pulse_start_f, pulse_end_f, frequency_step)

    all_freq_pulses = []

    for each_freq in freqs:
        one_tone = np.sin(2*np.pi*t_pulse*each_freq)
        one_tone *= signal.tukey(one_tone.size, 0.85)
        one_tone *= 0.5
        one_tone_w_silences = np.float32(np.concatenate((silence,one_tone)))
        all_freq_pulses.append(one_tone_w_silences)

    # setup the speaker playbacks to first play the sweeps and then 
    # the pulses : 
    playback_sounds = [all_sweeps, all_freq_pulses]
    return playback_sounds, numsamples_comlength 
    def _execute(self, namespace):
        self._start_time = time.time()
        ims = namespace[self.input_name]

        dtype = ims.data[:, :, 0].dtype

        # Somewhat arbitrary way to decide on chunk size
        chunk_size = 100000000 / ims.data.shape[0] / ims.data.shape[
            1] / dtype.itemsize
        chunk_size = max(1, chunk_size)
        chunk_size = int(chunk_size)
        #        print chunk_size

        tukey_mask_x = signal.tukey(ims.data.shape[0], self.tukey_size)
        tukey_mask_y = signal.tukey(ims.data.shape[1], self.tukey_size)
        self._tukey_mask_2d = np.multiply(
            *np.meshgrid(tukey_mask_x, tukey_mask_y, indexing='ij'))[:, :,
                                                                     None]

        if self.cache_clip == "":
            raw_data = np.empty(tuple(
                np.asarray(ims.data.shape[:3], dtype=np.long)),
                                dtype=dtype)
        else:
            raw_data = np.memmap(self.cache_clip,
                                 dtype=dtype,
                                 mode='w+',
                                 shape=tuple(
                                     np.asarray(ims.data.shape[:3],
                                                dtype=np.long)))

        progress = 0.2 * ims.data.shape[2]
        for f in np.arange(0, ims.data.shape[2], chunk_size):
            raw_data[:, :, f:f + chunk_size] = self.applyFilter(
                ims.data[:, :, f:f + chunk_size])

            if (f + chunk_size >= progress):
                if isinstance(raw_data, np.memmap):
                    raw_data.flush()
                progress += 0.2 * ims.data.shape[2]
                print("{:.2f} s. Completed clipping {} of {} total images.".
                      format(time.time() - self._start_time,
                             min(f + chunk_size, ims.data.shape[2]),
                             ims.data.shape[2]))

        clipped_images = ImageStack(raw_data, mdh=ims.mdh)
        self.completeMetadata(clipped_images)

        namespace[self.output_name] = clipped_images
Example #18
0
def taper1(data):
    '''
    apply a cosine taper using tukey window
    '''
    ndata = np.zeros(shape=data.shape, dtype=data.dtype)
    if data.ndim == 1:
        npts = data.shape[0]
        win = signal.tukey(npts, alpha=0.05)
        ndata = data * win
    elif data.ndim == 2:
        npts = data.shape[1]
        win = signal.tukey(npts, alpha=0.05)
        for ii in range(data.shape[0]):
            ndata[ii] = data[ii] * win
    return ndata
    def _build_spectrograms_function(self, audio_data):

        n = np.shape(audio_data)[0]
        window = signal.tukey(1024, alpha=0.75)
        window = np.tile(window, (n, 1))
        window = np.reshape(window, (n, _NUMBER_OF_SAMPLES))
        raw_audio = audio_data * window
        fftdata = np.abs(np.fft.rfft(raw_audio, 1024, axis=1))[:, :-1]
        fftdata = fftdata ** 2
        # energy = np.sum(fftdata, axis=-1)
        lifter_num = 22
        lo_freq = 0
        hi_freq = 6400
        filter_num = 24
        mfcc_num = 12
        fft_len = 512

        dct_base = np.zeros((filter_num, mfcc_num))
        for m in range(mfcc_num):
            dct_base[:, m] = np.cos((m + 1) * np.pi / filter_num * (np.arange(filter_num) + 0.5))
        lifter = 1 + (lifter_num / 2) * np.sin(np.pi * (1 + np.arange(mfcc_num)) / lifter_num)

        mfnorm = np.sqrt(2.0 / filter_num)

        filter_mat = self.createfilters(fft_len, filter_num, lo_freq, hi_freq, 2*hi_freq)
        coefficients = self.get_feats(fft_len, fftdata, mfcc_num, dct_base, mfnorm, lifter, filter_mat)
        # coefficients[:, 0] = energy
        coefficients = np.float32(coefficients)
        return coefficients
Example #20
0
def analyze(rhM, time, mass):

    peaks, prop = scipy.signal.find_peaks(abs(rhM))
    ampls = rhM[peaks]
    merg = np.amax(abs(ampls))
    merg = np.where(abs(ampls) == merg)
    merg = int(merg[0])
    t1 = peaks[merg]

    for mj in range(len(time)):
        if time[mj] > 0.0:
            flag = 'pos'
            t0 = mj
            break

    ampl = rhM[t0:]
    tim = time[t0:]

    #ampl=rhM
    #tim=time

    tuk = signal.tukey(len(ampl), 0.03)
    dat = ampl * tuk

    fq, fd = fre_do(tim, dat, mass)

    mx = np.where(fd == np.amax(fd))[0][0]
    freq = fq[mx]
    amp = fd[mx]

    return fq, fd, tim, dat
def plot_spectrogram(data, rate, name, dir_label):
    window_size = 2**10
    overlap = window_size // 8
    window = sig.tukey(M=window_size, alpha=0.25)

    freq, time, spectrogram = sig.spectrogram(data,
                                              fs=rate,
                                              window=window,
                                              nperseg=window_size,
                                              scaling='density',
                                              noverlap=overlap)
    spectrogram = np.log10(np.flipud(spectrogram))
    try:
        if spectrogram.shape[1] > 512:
            spec_padded = spectrogram[:512, :512]
        elif spectrogram.shape[1] < 512:
            spec_padded = np.pad(spectrogram,
                                 ((0, 0), (0, 512 - spectrogram.shape[1])),
                                 mode='median')[:512, :]
        else:
            spec_padded = spectrogram
    except Exception as e:
        print('ERROR!')
        print('Fault in: {}'.format(name))
        raise
    spec_padded = transform.downscale_local_mean(spec_padded, (2, 2))

    final_path = os.path.join(output_dir, dir_label, name + '.png')
    plt.imsave(final_path, spec_padded, cmap=plt.get_cmap('gray'))
Example #22
0
def phase_mask(tr_in, phase, **kwargs):
    '''
    return window around PKIKP phase
    '''
    tr = tr_in.copy()
    window_len = kwargs.get('window_len', 20)
    in_model = kwargs.get('model', 'prem50')

    if type(in_model) == str:
        model = TauPyModel(model=in_model)
    else:
        model = in_model

    tr.stats.distance = tr.stats.sac['gcarc']
    origin_time = tr.stats.sac['o']
    start = tr.stats.starttime

    sr = tr.stats.sampling_rate
    time = model.get_travel_times(source_depth_in_km=tr.stats.sac['evdp'],
                                  distance_in_degree=tr.stats.sac['gcarc'],
                                  phase_list=phase)
    t = time[0].time + origin_time
    window = tukey(int(sr * window_len), 0.2)
    mask = np.zeros(len(tr.data))
    mask[int(sr * t):int(sr * t) + int(sr * window_len)] = window
    tr.data *= mask
    return tr
Example #23
0
    def trim(self, time_start, time_end, taper=0.05, high_pass=0.005):
        accels = self.accels.copy()

        # Trim time series if needed
        if not np.isclose(time_start, time_end):
            start = int(time_start / self.time_step)
            if time_end > 0 and time_start < time_end:
                end = int(time_end / self.time_step)
            else:
                end = None
            accels = accels[start: end]

        # High pass filter in the frequency domain
        if high_pass > 0:
            # Need to normalize frequency by the Nyquist frequency
            w_nyq = 1 / (2 * self.time_step)
            w_n = high_pass / w_nyq
            b, a = scipy.signal.butter(4, w_n, 'highpass')
            accels = scipy.signal.lfilter(b, a, accels)

        # Apply cosine taper
        if taper > 0:
            accels *= tukey(accels.size, taper / 100)

        ts = self.copy()
        ts.accels = accels
        return ts
Example #24
0
def tukey(field, alpha, points):
    if points % 2 == 0:
        points += 1
    tukey = signal.tukey(points, alpha, sym=True)
    length = len(field)
    half = int(points / 2)
    field2 = np.zeros(np.shape(field))
    for i in range(length):
        mint = i - half
        maxt = i + half
        if mint < 0 and maxt < (length):
            excess = abs(mint)
            tukey_m = tukey[excess:]
            shortened = field[:maxt + 1]
        elif maxt >= (length) and mint >= 0:
            excess = maxt - length
            tukey_m = tukey[:-excess - 1]
            shortened = field[mint:]
        elif mint < 0 and maxt >= (length - 1):
            excessl = abs(mint)
            excessu = maxt - length
            tukey_m = tukey[excessl:-1 - excessu]
            shortened = field
        else:
            shortened = field[mint:maxt + 1]
            tukey_m = tukey
        for j in range(len(tukey_m)):
            field2[i] = field2[i] + shortened[j] * tukey_m[j]
        field2[i] = field2[i] / (sum(tukey_m))
    return (field2)
Example #25
0
def ft(f, pad=2, alpha=.25):
    ny, nx = f.shape

    if alpha > 0:
        wy = tukey(ny, alpha, True)
        wx = tukey(nx, alpha, True)
        f = f * (wy.reshape(-1, 1) * wx.reshape(1, -1))

    f = np.hstack((np.zeros((ny, nx // pad)), f, np.zeros((ny, nx // pad))))
    f = fftshift(fft(fftshift(f, axes=1), axis=1),
                 axes=1)[:, (nx // pad):(nx + nx // pad)]
    f = np.vstack((np.zeros((ny // pad, nx)), f, np.zeros((ny // pad, nx))))
    f = fftshift(fft(fftshift(f, axes=0), axis=0),
                 axes=0)[(ny // pad):(ny + ny // pad), :]

    return f
Example #26
0
def test_tukey():
    # Test against hardcoded data
    for k, v in tukey_data.items():
        if v is None:
            assert_raises(ValueError, signal.tukey, *k)
        else:
            win = signal.tukey(*k)
            assert_allclose(win, v, rtol=1e-14)

    # Test extremes of alpha correspond to boxcar and hann
    tuk0 = signal.tukey(100, 0)
    tuk1 = signal.tukey(100, 1)
    box0 = signal.boxcar(100)
    han1 = signal.hann(100)
    assert_array_almost_equal(tuk0, box0)
    assert_array_almost_equal(tuk1, han1)
Example #27
0
def analyze(rh, mass):

    rhM = rh[:, 1]
    time = rh[:, 0]

    peaks, prop = scipy.signal.find_peaks(abs(rhM))
    ampls = rhM[peaks]
    merg = np.amax(abs(ampls))
    merg = np.where(abs(ampls) == merg)
    merg = int(merg[0])
    t0 = peaks[merg]

    ampl = rhM[t0:]
    tim = time[t0:]

    #ampl=rhM
    #tim=time

    tuk = signal.tukey(len(ampl), 0.03)
    dat = ampl * tuk

    fq, fd = fre_do(tim, dat, mass)

    mx = np.where(fd == np.amax(fd))[0][0]
    freq = fq[mx]
    amp = fd[mx]
    fig = plt.figure()
    plt.plot((fq * Frequency), fd)
    plt.xlim(0, 5500)
    return fq, fd, tim, dat, fig
Example #28
0
    def apodize(self, array, alpha=0.075):
        """
        Force the magnitude of an array to go to zero at the boundaries.

        Parameters
        ----------
        array : `~numpy.ndarray`
            Array to apodize
        alpha : float between zero and one
            Alpha parameter for the Tukey window function. For best results,
            keep between 0.075 and 0.2.

        Returns
        -------
        apodized_arr : `~numpy.ndarray`
            Apodized array
        """
        if self.apodization_window_function is None:
            x, y = self.mgrid
            n = len(x[0])
            tukey_window = tukey(n, alpha)
            self.apodization_window_function = tukey_window[:, np.newaxis] * tukey_window

        apodized_array = array * self.apodization_window_function
        return apodized_array
Example #29
0
def condition_data(data, To=2, fw=2048, window='tukey', qtrans=False, qsplit=False, dT=2.0):
	"""
	this functions conditions the data in a similar manner to what is done in th 'Omicron' algorithm
	inputs:
		data - TimeSeries - the data to be conditioned
		To - float - overlap time between chunks, default=1
		fw - int - working frequency in Hz, default=2048
		window - ndarray or string - window to use, defualt='tukey' - use tukey window
		qtrans - if True perform the qtransform and return that as the conditioned data. default=False
		qsplit - if True split the qtransform to separate images for better resolution
		dT - float - length in time for each q-transform image
	output:
		cond_data - TimeSeries or ndarray - conditioned data, either strain data TimeSeries or ndarray with qtransform image
	"""

	cond_data = data - data.mean() # remove DC component
	# cond_data = cond_data.resample(rate=fw, ftype = 'iir', n=20) # downsample to working frequency fw
	# cond_data = cond_data.resample(rate=4096, ftype = 'iir', n=20) # downsample to working frequency fw
	cond_data = cond_data.highpass(frequency=20, filtfilt=True) # filter out frequencies below 20Hz

	Nc = len(cond_data)
	Tc = Nc * cond_data.dt.value
	if window == 'tukey':
		window = scisig.tukey(M=Nc, alpha=1.0*To/Tc, sym=True)

	cond_data = cond_data * window
	# print(sum(cond_data**2 * cond_data.dt.value))
	# cond_data = cond_data.whiten(fftlength=2, overlap=1)
	# print(sum(cond_data**2 * cond_data.dt.value))

	if qtrans:
		cond_data = img_qtransform(cond_data, To, qsplit=qsplit, dT=dT) #, frange=(8, fw/2))
		# cond_data = split_qtransform(cond_data, To, qsplit=qsplit, dT=dT) #, frange=(8, fw/2))

	return cond_data
Example #30
0
def tukey_z_scale(z, center, length, alpha=0.25, points=101):
    """

    Args:
      z: z-coordinate
      center: center of Tukey window
      length: length of Tukey window
      alpha: rolloff (percentage of window) (Default value = 0.25)
      points: number of points in Tukey window (Default value = 101)

    Returns:
      z_scale (scale, relative to 1.0)

    """
    import numpy as np
    from scipy.signal import tukey

    z = np.abs(z)
    zmin = np.abs(center) - length / 2
    zmax = np.abs(center) + length / 2
    z_tukey_win = np.linspace(zmin, zmax, points)
    z_tukey_amp = tukey(points, alpha)
    if z < zmin or z > zmax:
        z_scale = 0.0
    else:
        z_scale = z_tukey_amp[np.min(np.where(z_tukey_win >= z))]

    return z_scale
def generate_spectrogram_from_data(fs, m, data, output_filepath):
    """
    Function used to generate Spectrogram images
    :param fs: frequency sample rate e.g. 128 Hz
    :param m: total number of points in window e.g. 128
    :param data: complete dataset from an input file
    :param output_filepath: path to export file of spectrogram
    :return None:
    """
    overlap = math.floor(m * 0.9)

    f, t, Sxx = signal.spectrogram(data,
                                   fs,
                                   noverlap=overlap,
                                   window=signal.tukey(m, 0.25))

    try:
        plt.pcolormesh(t, f, np.log10(Sxx))
        plt.set_cmap('jet')
        plt.axis('off')

        plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, dpi=35)
        plt.clf()
    except FloatingPointError as e:
        print('Caught divide by 0 error: {0}'.format(output_filepath))
        return
Example #32
0
def chop(*args,**kwargs):
    """Chop trace, or traces, using window"""
    
    if ('window' in kwargs):
        window = kwargs['window']
    
    if not isinstance(window,Window):
        raise Exception('window must be a Window')
    
    length = args[0].size
          
    if window.width > length:
        raise Exception('window width is greater than trace length')
    
    centre = int(length/2) + window.offset
    hw = int(window.width/2)    
    t0 = centre - hw
    t1 = centre + hw
    
    if t0 < 0:
        raise Exception('chop starts before trace data')
    elif t1 > length:
        raise Exception('chop ends after trace data')
        
    if window.tukey is not None:
        tukey = signal.tukey(window.width,alpha=window.tukey)
    else:
        tukey = 1.
    
    if len(args)==1:    
        return args[0][t0:t1+1] * tukey
    elif len(args)==2:
        return args[0][t0:t1+1] * tukey, args[1][t0:t1+1] * tukey
    elif len(args)==3:
        return args[0][t0:t1+1] * tukey, args[1][t0:t1+1] * tukey, args[2][t0:t1+1] * tukey
 def get_SNR(self, t, hn, ht, fs):
     T = t[-1] - t[0]
     NFFT = int(fs / 4)  # should be T*fs/8 to get a better background psd
     psd_window = np.blackman(NFFT)
     NOVL = int(NFFT / 2)
     dt = t[1] - t[0]
     template = ht + ht * 1.j
     datafreq = np.fft.fftfreq(template.size) * fs
     df = np.abs(datafreq[1] - datafreq[0])
     try:
         dwindow = signal.tukey(template.size,
                                alpha=1. / 8)  # Tukey window preferred, but requires recent scipy version
     except:
         dwindow = signal.blackman(template.size)  # Blackman window OK if Tukey is not available
     template_fft = np.fft.fft(template * dwindow) / fs
     data = hn.copy()
     data_psd, freqs = mlab.psd(data, Fs=fs, NFFT=NFFT, window=psd_window, noverlap=NOVL)
     data_fft = np.fft.fft(data * dwindow) / fs
     power_vec = np.interp(np.abs(datafreq), freqs, data_psd)
     optimal = template_fft.conjugate() * data_fft / power_vec
     optimal_time = 2 * np.fft.ifft(optimal) * fs
     sigmasq = 1 * (template_fft * template_fft.conjugate() / power_vec).sum() * df
     sigma = np.sqrt(np.abs(sigmasq))
     SNR_complex = optimal_time / sigma
     peaksample = int(data.size / 2)
     SNR_complex = np.roll(SNR_complex, peaksample)
     # peaksample = int(data.size / 2)
     # SNR_complex = np.roll(SNR_complex, peaksample)
     SNR = abs(SNR_complex)
     indmax = np.argmax(SNR)
     timemax = t[indmax]
     SNRmax = SNR[indmax]
     return SNRmax
Example #34
0
def test_tukey():
    # Test against hardcoded data
    for k, v in tukey_data.items():
        if v is None:
            assert_raises(ValueError, signal.tukey, *k)
        else:
            win = signal.tukey(*k)
            assert_allclose(win, v, rtol=1e-14)

    # Test extremes of alpha correspond to boxcar and hann
    tuk0 = signal.tukey(100, 0)
    tuk1 = signal.tukey(100, 1)
    box0 = signal.boxcar(100)
    han1 = signal.hann(100)
    assert_array_almost_equal(tuk0, box0)
    assert_array_almost_equal(tuk1, han1)
Example #35
0
    def apodize(self, array, alpha=0.075):
        """
        Force the magnitude of an array to go to zero at the boundaries.

        Parameters
        ----------
        array : `~numpy.ndarray`
            Array to apodize
        alpha : float between zero and one
            Alpha parameter for the Tukey window function. For best results,
            keep between 0.075 and 0.2.

        Returns
        -------
        apodized_arr : `~numpy.ndarray`
            Apodized array
        """
        if self.apodization_window_function is None:
            x, y = self.mgrid
            n = len(x[0])
            tukey_window = tukey(n, alpha)
            self.apodization_window_function = tukey_window[:, np.newaxis] * tukey_window

        apodized_array = array * self.apodization_window_function
        return apodized_array
Example #36
0
    def apodize(self, array, alpha=0.075):
        """
        Force the magnitude of an array to go to zero at the boundaries.
        Parameters
        ----------
        array : `~numpy.ndarray`
            Array to apodize
        alpha : float between zero and one
            Alpha parameter for the Tukey window function. For best results,
            keep between 0.075 and 0.2.
        Returns
        -------
        apodized_arr : `~numpy.ndarray`
            Apodized array
        """
        if self.apodization_window_function is None:
            x, y = self.mgrid
            n = len(x[0])
            tukey_window = tukey(n, alpha)
            self.apodization_window_function = np.atleast_3d(
                tukey_window[:, np.newaxis] * tukey_window)

        # In the most general case, array might represent a multi-wavelength hologram
        apodized_array = np.squeeze(
            np.atleast_3d(array) * self.apodization_window_function)
        return apodized_array
Example #37
0
def spectrogram(x, fft_size, window_step):
    framed = frame(x, fft_size, window_step)
    window = signal.tukey(fft_size, 0.25)
    windowed = framed * window
    fft = np.fft.fft(windowed, axis=1)
    power = np.abs(fft[:, 0:(fft.shape[1] // 2)].T)
    power[power <= 0] = 1e-12
    return np.log(power)
 def window(self,alpha):
     """
     effect: applies a window (fade in, fade out)
     """
     floats = self.fsignal
     window = signal.tukey(len(floats),alpha = alpha)
     # apply gaussian window
     self.fsignal = window * floats
Example #39
0
 def test_basic(self):
     # Test against hardcoded data
     for k, v in tukey_data.items():
         if v is None:
             assert_raises(ValueError, signal.tukey, *k)
         else:
             win = signal.tukey(*k)
             assert_allclose(win, v, rtol=1e-14)
  def Taper(self, Alpha):
    """
    Tukey window tapering
    """

    Win = _sig.tukey(self.HDR['NSMP'], alpha=Alpha)

    for I,S in enumerate(self.CHN):
      self.CHN[I] *= Win
Example #41
0
 def __init__(self, x, r, sym=True, x_offset=0.0):
     super().__init__()
     L = x.length
     
     if sym:
         data = signal.tukey(L, r, sym=sym)
         
     else:
         data = signal.tukey(2*L, r*2, sym=sym)
         data = data[L:]
         
     ii = 0
     for xi in x.data:
         if xi < x_offset:
             data[ii] = 0.0
         ii += 1
             
     self._make_me(x, data)     
Example #42
0
def test_tukey_scipy():
    """Test Tukey window against 1D scipy version."""

    # scipy.signal.tukey was introduced in Scipy v0.16.0
    from scipy.signal import tukey
    size = 101
    cen = (size - 1) // 2
    shape = (size, size)
    alpha = 0.4
    win = TukeyWindow(alpha=alpha)
    data = win(shape)
    ref1d = tukey(shape[0], alpha=alpha)
    assert_allclose(data[cen, :], ref1d)
Example #43
0
def lock2(f0, fp, fc, fs, coeff_ratio=8.0, coeffs=None,
          window='blackman', print_response=True):
    """Create a gentle fir filter. Pass frequencies below fp, cutoff frequencies
    above fc, and gradually taper to 0 in between."""

    # Convert to digital frequencies, normalizing f_nyq to 1,
    # as requested by scipy.signal.firwin2
    nyq = fs / 2
    fp = fp / nyq
    fc = fc / nyq

    if coeffs is None:
        coeffs = int(round(coeff_ratio / fc, 0))

    # Force number of tukey coefficients odd
    alpha = (1-fp*1.0/fc)
    n = int(round(1000. / alpha) // 2)

    N = n * 2 + 1
    f = np.linspace(0, fc, n+1)

    fm = np.zeros(n + 2)
    mm = np.zeros(n + 2)
    fm[:-1] = f
    # Append fm = nyquist frequency by hand; needed by firwin2
    fm[-1] = 1.
    m = signal.tukey(N, alpha=alpha)
    # Only take the falling part of the tukey window,
    # not the part equal to zero
    mm[:-1] = m[n:]

    # Use approx. 8x more frequencies than total coefficients we need
    nfreqs = 2**(int(round(np.log2(coeffs)))+3)+1

    b = signal.firwin2(coeffs, fm, mm,
                       nfreqs=nfreqs,
                       window=window)

    # Force filter gain to 1 at DC; corrects for small rounding errors
    b = b / np.sum(b)

    w, rep = signal.freqz(b, worN=np.pi*np.array([0., fp/2, fp, fc, 2*fc,
                                                  0.5*f0/nyq, f0/nyq, 1.]))
    if print_response:
        print("Response:")
        _print_magnitude_data(w, rep, fs)

    return b
Example #44
0
 def extractCurves(self):
     pX,pY,vX,vY = self.psiXlist,self.psiYlist, self.VXlist,self.VYlist
     self.b1 = "up"
     self.b2 = "up"
     self.x1old,self.y1old = None,None
     self.x2old,self.y2old = None,None
     self.psiXlist,self.psiYlist = -1*np.ones(self.cWidth),self.yCenter*np.ones(self.cWidth)
     self.VXlist,self.VYlist = -1*np.ones(self.cWidth),self.yCenter*np.ones(self.cWidth)
     self.get_tk_widget().delete("line")
     # Apply tukey window function to psi
     pwY = tukey(620,alpha=.1)*(pY[100:720]-self.yCenter)/self.yScale
     vwY = vY[100:720]-self.yCenter
     dasCurves = np.array([pwY,vwY])
     thresholdPsi = np.sum(pX[100:720])>-610
     thresholdV = np.sum(vX[100:720])>-610
     if thresholdPsi: self.oldPsi = pY
     if thresholdV: self.oldV = vY
     return dasCurves, thresholdPsi, thresholdV# Thresholds on activation