コード例 #1
0
ファイル: hilbert.py プロジェクト: ramdarpaproject/ptsa_new
def hilbert_pow(dat_ts, bands=None, pad_to_pow2=False, verbose=True):
    """
    """
    # set default freq bands
    if bands is None:
        bands = freq_bands

    # proc padding
    taxis = dat_ts.get_axis(dat_ts.tdim)
    npts_orig = dat_ts.shape[taxis]
    if pad_to_pow2:
        npts = 2**next_pow2(npts_orig)
    else:
        npts = npts_orig

    # calc the hilbert power
    if verbose:
        sys.stdout.write('Hilbert Bands: ')
        sys.stdout.flush()
    pow = None
    for band in bands:
        if verbose:
            sys.stdout.write('%s ' % band[0])
            sys.stdout.flush()
        p = TimeSeries(np.abs(
            hilbert(dat_ts.filtered(band[1], filt_type='pass'),
                    N=npts,
                    axis=taxis).take(np.arange(npts_orig), axis=taxis)),
                       tdim=dat_ts.tdim,
                       samplerate=dat_ts.samplerate,
                       dims=dat_ts.dims.copy()).add_dim(Dim([band[0]],
                                                            'freqs'))
        if pow is None:
            pow = p
        else:
            pow = pow.extend(p, 'freqs')

    if verbose:
        sys.stdout.write('\n')
        sys.stdout.flush()
    return pow
コード例 #2
0
ファイル: hilbert.py プロジェクト: apoorv2904/ptsa
def hilbert_pow(dat_ts, bands=None, pad_to_pow2=False, verbose=True):
    """
    """
    # set default freq bands
    if bands is None:
        bands = freq_bands

    # proc padding
    taxis = dat_ts.get_axis(dat_ts.tdim)
    npts_orig = dat_ts.shape[taxis]
    if pad_to_pow2:
        npts = 2**next_pow2(npts_orig)
    else:
        npts = npts_orig

    # calc the hilbert power
    if verbose:
        sys.stdout.write('Hilbert Bands: ')
        sys.stdout.flush()
    pow = None
    for band in bands:
        if verbose:
            sys.stdout.write('%s '%band[0])
            sys.stdout.flush()
        p = TimeSeries(np.abs(hilbert(dat_ts.filtered(band[1], 
                                                      filt_type='pass'),
                                      N=npts, axis=taxis).take(np.arange(npts_orig),
                                                               axis=taxis)), 
                       tdim=dat_ts.tdim, samplerate=dat_ts.samplerate, 
                       dims=dat_ts.dims.copy()).add_dim(Dim([band[0]],'freqs'))
        if pow is None:
            pow = p
        else:
            pow = pow.extend(p, 'freqs')

    if verbose:
        sys.stdout.write('\n')
        sys.stdout.flush()
    return pow
コード例 #3
0
ファイル: timeseries.py プロジェクト: maciekswat/ptsa
    def resampled(self, resampled_rate, window=None,
                  loop_axis=None, num_mp_procs=0, pad_to_pow2=False):
        """
        Resample the data and reset all the time ranges.

        Uses the resample function from scipy.  This method seems to
        be more accurate than the decimate method.

        Parameters
        ----------
        resampled_rate : {float}
            New sample rate to resample to.
        window : {None,str,float,tuple}, optional
            See scipy.signal.resample for details
        loop_axis: {None,str,int}, optional
            Sometimes it might be faster to loop over an axis.
        num_mp_procs: int, optional
            Whether to try and use multiprocessing to loop over axis.
            0 means no multiprocessing
            >0 specifies num procs to use
            None means yes, and use all possible procs
        pad_to_pow2: bool, optional
            Pad along the time dimension to the next power of 2 so
            that the resampling is much faster (experimental).

        Returns
        -------
        ts : {TimeSeries}
            A TimeSeries instance with the resampled data.

        See Also
        --------
        scipy.signal.resample
        """
        # resample the data, getting new time range
        time_range = self[self.tdim]
        new_length = int(np.round(len(time_range)*
                                  resampled_rate/self.samplerate))

        if pad_to_pow2:
            padded_length = 2**next_pow2(len(time_range))
            padded_new_length = int(np.round(padded_length*resampled_rate/self.samplerate))
            time_range = np.hstack([time_range, 
                                    (np.arange(1,padded_length-len(time_range)+1)*np.diff(time_range[-2:]))+time_range[-1]])

        if loop_axis is None:
            # just do standard method on all data at once
            if pad_to_pow2:
                newdat,new_time_range = resample(pad_to_next_pow2(np.asarray(self),axis=self.taxis), 
                                                 padded_new_length, t=time_range,
                                                 axis=self.taxis, window=window)
            else:
                newdat,new_time_range = resample(np.asarray(self),
                                                 new_length, t=time_range,
                                                 axis=self.taxis, window=window)

        else:
            # loop over specified axis
            # get the loop axis name and length
            loop_dim = self.get_dim_name(loop_axis)
            loop_dim_len = len(self[loop_dim])
            # specify empty boolean index
            ind = np.zeros(loop_dim_len,dtype=np.bool)
            newdat = []
            if has_mp and num_mp_procs != 0:
                po = mp.Pool(num_mp_procs)

            for i in range(loop_dim_len):
                ind[i] = True
                dat = self.select(**{loop_dim:ind})
                taxis = dat.taxis
                if has_mp and num_mp_procs != 0:
                    # start async proc
                    if pad_to_pow2:
                        dat = pad_to_next_pow2(np.asarray(dat), axis=dat.taxis)
                        newdat.append(po.apply_async(resample,
                                                     (np.asarray(dat), padded_new_length, time_range,
                                                      taxis, window)))
                    else:
                        newdat.append(po.apply_async(resample,
                                                     (np.asarray(dat), new_length, time_range,
                                                      taxis, window)))
                else:
                    # just call on that dataset
                    sys.stdout.write('%d '%i)
                    sys.stdout.flush()
                    if pad_to_pow2:
                        dat = pad_to_next_pow2(np.asarray(dat), axis=dat.taxis)
                        ndat,new_time_range = resample(np.asarray(dat), padded_new_length, t=time_range,
                                                       axis=taxis, window=window)
                    else:
                        ndat,new_time_range = resample(np.asarray(dat), new_length, t=time_range,
                                                       axis=taxis, window=window)
                    newdat.append(ndat)
                ind[i] = False
            if has_mp and num_mp_procs != 0:
                # aggregate mp results
                po.close()
                #po.join()  
                out = []
                for i in range(len(newdat)):
                    sys.stdout.write('%d '%i)
                    sys.stdout.flush()
                    out.append(newdat[i].get())
                #out = [newdat[i].get() for i in range(len(newdat))]
                newdat = [out[i][0] for i in range(len(out))]
                new_time_range = out[i][1]

            # concatenate the new data
            newdat = np.concatenate(newdat,axis=self.get_axis(loop_axis))

            sys.stdout.write('\n')
            sys.stdout.flush()

        # remove pad if we padded it
        if pad_to_pow2:
            newdat = newdat.take(range(new_length),axis=self.taxis)
            new_time_range = new_time_range[:new_length]

        # set the time dimension
        newdims = self.dims.copy()
        attrs = self.dims[self.taxis]._attrs.copy()
        for k in self.dims[self.taxis]._required_attrs.keys():
            attrs.pop(k,None)
        newdims[self.taxis] = Dim(new_time_range,
                                  self.dims[self.taxis].name,
                                  **attrs)

        attrs = self._attrs.copy()
        for k in self._required_attrs.keys():
            attrs.pop(k,None)
        return TimeSeries(newdat, self.tdim, resampled_rate,
                          dims=newdims, **attrs)
コード例 #4
0
ファイル: timeseries.py プロジェクト: isaac-ped/ptsa_new
    def resampled(self, resampled_rate, window=None,
                  loop_axis=None, num_mp_procs=0, pad_to_pow2=False):
        """
        Resample the data and reset all the time ranges.

        Uses the resample function from scipy.  This method seems to
        be more accurate than the decimate method.

        Parameters
        ----------
        resampled_rate : {float}
            New sample rate to resample to.
        window : {None,str,float,tuple}, optional
            See scipy.signal.resample for details
        loop_axis: {None,str,int}, optional
            Sometimes it might be faster to loop over an axis.
        num_mp_procs: int, optional
            Whether to try and use multiprocessing to loop over axis.
            0 means no multiprocessing
            >0 specifies num procs to use
            None means yes, and use all possible procs
        pad_to_pow2: bool, optional
            Pad along the time dimension to the next power of 2 so
            that the resampling is much faster (experimental).

        Returns
        -------
        ts : {TimeSeries}
            A TimeSeries instance with the resampled data.

        See Also
        --------
        scipy.signal.resample
        """
        # resample the data, getting new time range
        time_range = self[self.tdim]
        new_length = int(np.round(len(time_range)*resampled_rate/self.samplerate))

        if pad_to_pow2:
            padded_length = 2**next_pow2(len(time_range))
            padded_new_length = int(np.round(padded_length*resampled_rate/self.samplerate))
            time_range = np.hstack([time_range, 
                                    (np.arange(1,padded_length-len(time_range)+1)*np.diff(time_range[-2:]))+time_range[-1]])

        if loop_axis is None:
            # just do standard method on all data at once
            if pad_to_pow2:
                newdat,new_time_range = resample(pad_to_next_pow2(np.asarray(self),axis=self.taxis), 
                                                 padded_new_length, t=time_range,
                                                 axis=self.taxis, window=window)
            else:
                newdat,new_time_range = resample(np.asarray(self),
                                                 new_length, t=time_range,
                                                 axis=self.taxis, window=window)

        else:
            # loop over specified axis
            # get the loop axis name and length
            loop_dim = self.get_dim_name(loop_axis)
            loop_dim_len = len(self[loop_dim])
            # specify empty boolean index
            ind = np.zeros(loop_dim_len,dtype=np.bool)
            newdat = []
            if has_mp and num_mp_procs != 0:
                po = mp.Pool(num_mp_procs)

            for i in range(loop_dim_len):
                ind[i] = True
                dat = self.select(**{loop_dim:ind})
                taxis = dat.taxis
                if has_mp and num_mp_procs != 0:
                    # start async proc
                    if pad_to_pow2:
                        dat = pad_to_next_pow2(np.asarray(dat), axis=dat.taxis)
                        newdat.append(po.apply_async(resample,
                                                     (np.asarray(dat), padded_new_length, time_range,
                                                      taxis, window)))
                    else:
                        newdat.append(po.apply_async(resample,
                                                     (np.asarray(dat), new_length, time_range,
                                                      taxis, window)))
                else:
                    # just call on that dataset
                    sys.stdout.write('%d '%i)
                    sys.stdout.flush()
                    if pad_to_pow2:
                        dat = pad_to_next_pow2(np.asarray(dat), axis=dat.taxis)
                        ndat,new_time_range = resample(np.asarray(dat), padded_new_length, t=time_range,
                                                       axis=taxis, window=window)
                    else:
                        ndat,new_time_range = resample(np.asarray(dat), new_length, t=time_range,
                                                       axis=taxis, window=window)
                    newdat.append(ndat)
                ind[i] = False
            if has_mp and num_mp_procs != 0:
                # aggregate mp results
                po.close()
                #po.join()
                out = []
                for i in range(len(newdat)):
                    sys.stdout.write('%d '%i)
                    sys.stdout.flush()
                    out.append(newdat[i].get())
                #out = [newdat[i].get() for i in range(len(newdat))]
                newdat = [out[i][0] for i in range(len(out))]
                new_time_range = out[i][1]

            # concatenate the new data
            newdat = np.concatenate(newdat,axis=self.get_axis(loop_axis))

            sys.stdout.write('\n')
            sys.stdout.flush()

        # remove pad if we padded it
        if pad_to_pow2:
            newdat = newdat.take(range(new_length),axis=self.taxis)
            new_time_range = new_time_range[:new_length]

        # set the time dimension
        newdims = self.dims.copy()
        attrs = self.dims[self.taxis]._attrs.copy()
        for k in self.dims[self.taxis]._required_attrs.keys():
            attrs.pop(k,None)
        newdims[self.taxis] = Dim(new_time_range,
                                  self.dims[self.taxis].name,
                                  **attrs)

        attrs = self._attrs.copy()
        for k in self._required_attrs.keys():
            attrs.pop(k,None)
        return TimeSeries(newdat, self.tdim, resampled_rate,
                          dims=newdims, **attrs)
コード例 #5
0
ファイル: wavelet.py プロジェクト: Shotgunosine/ptsa
def fconv_multi(in1, in2, mode='full'):
    """
    Convolve multiple 1-dimensional arrays using FFT.

    Calls scipy.signal.fft on every row in in1 and in2, multiplies
    every possible pairwise combination of the transformed rows, and
    returns an inverse fft (by calling scipy.signal.ifft) of the
    result. Therefore the output array has as many rows as the product
    of the number of rows in in1 and in2 (the number of colums depend
    on the mode).
    
    Parameters
    ----------
    in1 : {array_like}
        First input array. Must be arranged such that each row is a
        1-D array with data to convolve.
    in2 : {array_like}
        Second input array. Must be arranged such that each row is a
        1-D array with data to convolve.
    mode : {'full','valid','same'},optional
        Specifies the size of the output. See the docstring for
        scipy.signal.convolve() for details.
    
    Returns
    -------
    Array with in1.shape[0]*in2.shape[0] rows with the convolution of
    the 1-D signals in the rows of in1 and in2.
    """    
    # ensure proper number of dimensions
    in1 = np.atleast_2d(in1)
    in2 = np.atleast_2d(in2)

    # get the number of signals and samples in each input
    num1,s1 = in1.shape
    num2,s2 = in2.shape
    
    # see if we will be returning a complex result
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))

    # determine the size based on the next power of 2
    actual_size = s1+s2-1
    size = np.power(2,next_pow2(actual_size))

    # perform the fft of each row of in1 and in2:
    #in1_fft = np.empty((num1,size),dtype=np.complex128)
    in1_fft = np.empty((num1,size),dtype=np.complex)
    for i in xrange(num1):
        in1_fft[i] = fft(in1[i],size)
    #in2_fft = np.empty((num2,size),dtype=np.complex128)
    in2_fft = np.empty((num2,size),dtype=np.complex)
    for i in xrange(num2):
        in2_fft[i] = fft(in2[i],size)
    
    # duplicate the signals and multiply before taking the inverse
    in1_fft = in1_fft.repeat(num2,axis=0)
    in1_fft *= np.vstack([in2_fft]*num1)
    ret = ifft(in1_fft)
#     ret = ifft(in1_fft.repeat(num2,axis=0) * \
#                np.vstack([in2_fft]*num1))
    
    # delete to save memory
    del in1_fft, in2_fft
    
    # strip of extra space if necessary
    ret = ret[:,:actual_size]
    
    # determine if complex, keeping only real if not
    if not complex_result:
        ret = ret.real
    
    # now only keep the requested portion
    if mode == "full":
        return ret
    elif mode == "same":
        if s1 > s2:
            osize = s1
        else:
            osize = s2
        return centered(ret,(num1*num2,osize))
    elif mode == "valid":
        return centered(ret,(num1*num2,np.abs(s2-s1)+1))
コード例 #6
0
ファイル: wavelet.py プロジェクト: isaac-ped/ptsa_new
def fconv_multi(in1, in2, mode='full'):
    """
    Convolve multiple 1-dimensional arrays using FFT.

    Calls scipy.signal.fft on every row in in1 and in2, multiplies
    every possible pairwise combination of the transformed rows, and
    returns an inverse fft (by calling scipy.signal.ifft) of the
    result. Therefore the output array has as many rows as the product
    of the number of rows in in1 and in2 (the number of colums depend
    on the mode).
    
    Parameters
    ----------
    in1 : {array_like}
        First input array. Must be arranged such that each row is a
        1-D array with data to convolve.
    in2 : {array_like}
        Second input array. Must be arranged such that each row is a
        1-D array with data to convolve.
    mode : {'full','valid','same'},optional
        Specifies the size of the output. See the docstring for
        scipy.signal.convolve() for details.
    
    Returns
    -------
    Array with in1.shape[0]*in2.shape[0] rows with the convolution of
    the 1-D signals in the rows of in1 and in2.
    """    
    # ensure proper number of dimensions
    in1 = np.atleast_2d(in1)
    in2 = np.atleast_2d(in2)

    # get the number of signals and samples in each input
    num1,s1 = in1.shape
    num2,s2 = in2.shape
    
    # see if we will be returning a complex result
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))

    # determine the size based on the next power of 2
    actual_size = s1+s2-1
    size = np.power(2,next_pow2(actual_size))

    # perform the fft of each row of in1 and in2:
    #in1_fft = np.empty((num1,size),dtype=np.complex128)
    in1_fft = np.empty((num1,size),dtype=np.complex)
    for i in xrange(num1):
        in1_fft[i] = fft(in1[i],size)
    #in2_fft = np.empty((num2,size),dtype=np.complex128)
    in2_fft = np.empty((num2,size),dtype=np.complex)
    for i in xrange(num2):
        in2_fft[i] = fft(in2[i],size)
    
    # duplicate the signals and multiply before taking the inverse
    in1_fft = in1_fft.repeat(num2,axis=0)
    in1_fft *= np.vstack([in2_fft]*num1)
    ret = ifft(in1_fft)
#     ret = ifft(in1_fft.repeat(num2,axis=0) * \
#                np.vstack([in2_fft]*num1))
    
    # delete to save memory
    del in1_fft, in2_fft
    
    # strip of extra space if necessary
    ret = ret[:,:actual_size]
    
    # determine if complex, keeping only real if not
    if not complex_result:
        ret = ret.real
    
    # now only keep the requested portion
    if mode == "full":
        return ret
    elif mode == "same":
        if s1 > s2:
            osize = s1
        else:
            osize = s2
        return centered(ret,(num1*num2,osize))
    elif mode == "valid":
        return centered(ret,(num1*num2,np.abs(s2-s1)+1))