def generateCorrelations(self,doDetrend=True):
     # auto correlation corefficient of u
     if doDetrend:
         ux=signal.detrend(self.ux());
         uy=signal.detrend(self.uy());
         uz=signal.detrend(self.uz());
         umag=signal.detrend(self.Umag());
     else:
         ux=self.ux();
         uy=self.uy();
         uz=self.uz();
         umag=self.Umag();
     #ux=ux[-samples:-1]
     #uy=uy[-samples:-1]
     #uz=uz[-samples:-1]
     self.data['r11'],self.data['taur11'] = tt.xcorr_fft(ux, maxlags=None, norm='coeff')
     self.data['r22'],self.data['taur22'] = tt.xcorr_fft(uy, maxlags=None, norm='coeff')
     self.data['r33'],self.data['taur33'] = tt.xcorr_fft(uz, maxlags=None, norm='coeff')
     self.data['r12'],self.data['taur12'] = tt.xcorr_fft(ux,y=uy, maxlags=None, norm='coeff')
     self.data['r13'],self.data['taur13'] = tt.xcorr_fft(ux,y=uz, maxlags=None, norm='coeff')
     self.data['r23'],self.data['taur23'] = tt.xcorr_fft(uy,y=uz, maxlags=None, norm='coeff')
     self.data['rmag'],self.data['taurmag'] = tt.xcorr_fft(umag, maxlags=None, norm='coeff')
     # auto correlation of u
     self.data['R11'],self.data['tauR11'] = tt.xcorr_fft(ux, maxlags=None, norm='biased')
     self.data['R22'],self.data['tauR22'] = tt.xcorr_fft(uy, maxlags=None, norm='biased')
     self.data['R33'],self.data['tauR33'] = tt.xcorr_fft(uz, maxlags=None, norm='biased')
Example #2
0
def remt(st):
    '''
    rmean and retrend
    '''
    st.data = detrend(st.data, type='linear')
    st.data = detrend(st.data, type='constant')
    return st
    def __init__(self, path):
        # convertir el archivo a objeto python
        self.doc2event(path)
        
        # @warning: se requieren convertir las matrices y los atributos que
        # tienen puntos a arrays y a atributos de estructuras respectivamenente

        #convertimos todos los sismos a campos de desplazamiento
        
        acelerometers_id = [76, 82, 118, 126, 146, 147]
        
        
        #transformar todo a campo de desplazamiento
        for s in self.seismograms:
            
            #guardar los datos crudos para analisis posteriores
            s.raw_data = s.data
            # si es acelerometro
            if s.site_id in acelerometers_id:
                s.data = sig.detrend(np.cumsum(sig.detrend(np.cumsum(s.data[
                               :,2:5], axis=0), axis=0), axis=0), axis=0)    
            # si es velocimetro
            else:
                s.data = sig.detrend(np.cumsum(s.data[:, 2:5], axis=0), axis=0)
            
            timevector = s.timevector
            s.data = pd.DataFrame(s.data, index = timevector)
            s.raw_data = pd.DataFrame(s.raw_data[:,2:5], index = timevector)
            
        pass
def slidingWindow(P,inX=3,outX=32,inY=3,outY=64,maxM=50,norm=True):
	""" Enhance the constrast

		Cut off extreme values and demean the image
		Utilize scipy convolve2d to get the mean at a given pixel
		Remove local mean with inner exclusion region

		Args:
			P: 2-d numpy array image
			inX: inner exclusion region in the x-dimension
			outX: length of the window in the x-dimension
			inY: inner exclusion region in the y-dimension
			outY: length of the window in the y-dimension
			maxM: size of the output image in the y-dimension
			norm: boolean to cut off extreme values

		Returns:
			Q: 2-d numpy contrast enhanced
	"""
	Q = P.copy()
	m, n = Q.shape
	
	Q = exposure.equalize_hist(Q.astype('Float32'), nbins = 65)
	Q = detrend(Q.astype('Float32'), axis = 1)
	Q = detrend(Q.astype('Float32'), axis = 0)
	Q = wiener(Q.astype('Float32'), 4)

	return Q[:maxM,:]
Example #5
0
def phases_from_complex(wts, continuous=False, do_detrend=False):
    """Calculates phases from 1d or 2d wavelet/hilbert arrays, dim0 is time"""
    if len(wts.shape) == 1:
        #1d
        phasen = n.arctan2(wts.imag,wts.real)
        if not (continuous or do_detrend):
            return phasen
        else:
            phasen = make_phases_continuous(phasen)
            if do_detrend:
                phasen = detrend(phasen,axis=0)
            return phasen
    elif len(wts.shape) == 2:
        #2d
        phasen = n.arctan2(wts.imag,wts.real)
        if not (continuous or do_detrend):
            return phasen
        else:
            phasen = make_phases_continuous(phasen)
            if do_detrend:
                phasen = detrend(phasen,axis=0)
            return phasen
        
    else:
        raise ValueError("Only 1d and 2d arrays supported")
Example #6
0
    def preProcess(self,
                                    periodF0 = 0.06,
                                    deltaF_div_F0 = True,
                                    
                                    max_threshold = None,
                                    min_threshold = None,
                                    nan_to_zeros = True,
                                    
                                    detrend = False,
                                    
                                    #~ band_filter = None,
                                    
                                    gaussian_filter = None,
                                    
                                    f1 = None,
                                    f2 = None,
                                    
                                    **kargs):
        
        images = self.images
        if deltaF_div_F0:
            ind = self.t()<=self.t_start+periodF0
            m0 = mean(images[ind,:,:] , axis = 0)
            images = (images-m0)/m0*1000.
            
        if max_threshold is not None:
            #~ images[images>max_threshold] = max_threshold
            images[images>max_threshold] = nan
            

        if min_threshold is not None:
            #~ images[images<min_threshold] = min_threshold
            images[images<min_threshold] = nan
                
            
        if nan_to_zeros:
            images[isnan(images) ] = 0.

        if detrend and not nan_to_zeros:
            m = any(isnan(images) , axis = 0)
            images[isnan(images) ] = 0.
            images = signal.detrend( images , axis = 0)
            images[:,m] = nan
        elif detrend and nan_to_zeros:
            images = signal.detrend( images , axis = 0)
            
        if gaussian_filter is not None:
            images = ndimage.gaussian_filter( images , (0 , gaussian_filter , gaussian_filter))
            

        if f1 is not None or f2 is not None:
            from ..computing.filter import fft_passband_filter
            if f1 is None: f1=0.
            if f2 is None: f1=inf
            nq = self.sampling_rate/2.
            images = fft_passband_filter(images, f_low = f1/nq , f_high = f2/nq , axis = 0)
        
        return images
Example #7
0
def preprocess(matr, prepr, Fs, fc_min, fc_max, taper_fract):
    """
    :type matr: numpy.ndarray
    :param matr: time series of used stations (dim: [number of samples, number of stations])
    :type prepr: integer
    :param prepr: type of preprocessing. 0=None, 1=bandpass filter, 2=spectral whitening
    :type Fs: float
    :param Fs: sampling rate of data streams
    :type fc_min, fc_max: float
    :param fc_min, fc_max: corner frequencies used for preprocessing
    :type taper_fract: float
    :param taper_fract: percentage of frequency band which is tapered after spectral whitening

    :return: preprocessed data (dim: [number of samples, number of stations])
    """
    if prepr == 0:
        data = signal.detrend(matr, axis=0)

    elif prepr == 1:
        # generate frequency vector and butterworth filter
        b, a = signal.butter(4, np.array([fc_min, fc_max]) / Fs * 2, btype="bandpass")
        # filter data and normalize it by maximum energy
        data = signal.filtfilt(b, a, signal.detrend(matr, axis=0), axis=0)
        fact = np.sqrt(np.dot(np.ones((data.shape[0], 1)), np.sum(data**2, axis=0).reshape((1, data.shape[1]))))
        data = np.divide(data, fact)

    elif prepr == 2:
        nfft = nearest_powof2(matr.shape[0])
        Y = np.fft.fft(matr, n=nfft, axis=0)
        f = np.fft.fftfreq(nfft, 1./float(Fs))

        # whiten: discard all amplitude information within range fc
        Y_white = np.zeros(Y.shape)
        J = np.where((f > fc_min) & (f < fc_max))
        Y_white[J, :] = np.exp(1j * np.angle(Y[J, :]))

        # now taper within taper_fract
        deltaf = (fc_max - fc_min) * taper_fract
        Jdebut = np.where((f > fc_min) & (f < (fc_min + deltaf)))
        Jfin = np.where((f > (fc_max - deltaf)) & (f < fc_max))
        for ii in range(Y.shape[1]):
            if len(Jdebut[0]) > 1:
                Y_white[Jdebut, ii] = np.multiply(Y_white[Jdebut, ii],
                            np.sin(np.pi / 2 * np.arange(0, len(Jdebut[0])) / len(Jdebut[0]))**2)
            if len(Jfin[0]) > 1:
                Y_white[Jfin, ii] = np.multiply(Y_white[Jfin, ii],
                            np.cos(np.pi / 2 * np.arange(0, len(Jfin[0])) / len(Jfin[0]))**2)

        # perform inverse fft to obtain time signal
        # data = 2*np.real(np.fft.ifft(Y_white, n=nfft, axis=0))
        data = np.fft.ifft(Y_white, n=nfft, axis=0)
        # normalize it by maximum energy
        fact = np.sqrt(np.dot(np.ones((data.shape[0], 1)), np.sum(data**2, axis=0).reshape((1, data.shape[1]))))
        data = np.divide(data, fact)
    return data
Example #8
0
    def generateStatistics(self,doDetrend=True):
        '''
        Generates statistics and populates member variable data.

        Arguments:
            doDetrend: detrend data bevor sigbal processing

        Populates the "data" python dict with with the following keys:
            rii:    [numpy.array of shape=(?)] Auto-correlation coefficent rii. For i=1,2,3
            taurii: [numpy.array of shape=(?)] Time lags for rii. For i=1,2,3
            Rii:    [numpy.array of shape=(?)] Auto-correlation Rii. For i=1,2,3
            tauRii: [numpy.array of shape=(?)] Time lags for Rii. For i=1,2,3

            uifrq:  [numpy.array of shape=(?)] u1 in frequency domain. For i=1,2,3
            uiamp:  [numpy.array of shape=(?)] amplitude of u1 in frequency domain. For i=1,2,3
            Seiifrq:[numpy.array of shape=(?)] Frequencies for energy spectrum Seii. For i=1,2,3
            Seii:   [numpy.array of shape=(?)] Energy spectrum Seii derived from Rii. For i=1,2,3
        '''
        # auto correlation corefficient of u
        if doDetrend:
            ux=signal.detrend(self.ux());
            uy=signal.detrend(self.uy());
            uz=signal.detrend(self.uz());
            umag=signal.detrend(self.Umag());
        else:
            ux=self.ux();
            uy=self.uy();
            uz=self.uz();
            umag=self.Umag();
        #ux=ux[-samples:-1]
        #uy=uy[-samples:-1]
        #uz=uz[-samples:-1]
        self.data['r11'],self.data['taur11'] = tt.xcorr_fft(ux, maxlags=None, norm='coeff')
        self.data['r22'],self.data['taur22'] = tt.xcorr_fft(uy, maxlags=None, norm='coeff')
        self.data['r33'],self.data['taur33'] = tt.xcorr_fft(uz, maxlags=None, norm='coeff')
        self.data['r12'],self.data['taur12'] = tt.xcorr_fft(ux,y=uy, maxlags=None, norm='coeff')
        self.data['r13'],self.data['taur13'] = tt.xcorr_fft(ux,y=uz, maxlags=None, norm='coeff')
        self.data['r23'],self.data['taur23'] = tt.xcorr_fft(uy,y=uz, maxlags=None, norm='coeff')
        self.data['rmag'],self.data['taurmag'] = tt.xcorr_fft(umag, maxlags=None, norm='coeff')
        # auto correlation of u
        self.data['R11'],self.data['tauR11'] = tt.xcorr_fft(ux, maxlags=None, norm='none')
        self.data['R22'],self.data['tauR22'] = tt.xcorr_fft(uy, maxlags=None, norm='none')
        self.data['R33'],self.data['tauR33'] = tt.xcorr_fft(uz, maxlags=None, norm='none')


        #u in frequency domain
        self.data['u1frq'],self.data['u1amp'] = tt.dofft(sig=ux,samplefrq=self.data['frq'])
        self.data['u2frq'],self.data['u2amp'] = tt.dofft(sig=uy,samplefrq=self.data['frq'])
        self.data['u3frq'],self.data['u3amp'] = tt.dofft(sig=uz,samplefrq=self.data['frq'])
        #Time energy sectrum Se11 (mean: Rii in frequency domain...)
        self.data['Se11frq'],self.data['Se11'] = tt.dofft(sig=self.data['R11'],samplefrq=self.data['frq'])
        self.data['Se22frq'],self.data['Se22'] = tt.dofft(sig=self.data['R22'],samplefrq=self.data['frq'])
        self.data['Se33frq'],self.data['Se33'] = tt.dofft(sig=self.data['R33'],samplefrq=self.data['frq'])
def ts_ft(data, name):
	data /= np.max(np.abs(data), axis=0) 
	plt.subplot(311)
	plt.plot(ma.mean(ma.mean(data, 2), 1))
	plt.axis('tight')
	plt.subplot(312)
	plt.plot(signal.detrend(ma.mean(ma.mean(data, 2), 1), axis=0))
	plt.axis('tight')
	plt.subplot(313)
	plt.plot(np.log(np.fft.rfft(ma.mean(ma.mean(signal.detrend(data, axis=0), 2), 1)))[0:365])
	plt.axis('tight')
	plt.savefig(FIGDIR+'time_freq_'+name+'.png')
	plt.close('all')
Example #10
0
def summarize_timeseries(functional_path, masks_path, summary):

    if type(summary) is not dict:
        summary = {'method': summary}

    masks_img = [nb.load(mask_path) for mask_path in masks_path]
    mask = np.sum(np.array([
        mask_img.get_data() for mask_img in masks_img
    ]), axis=0) > 0.0

    if mask.sum() == 0:
        raise Exception(
            "The provided mask does not contains voxels. "
            "Please check if mask is being eroded and if the segmentation worked correctly."
        )

    functional_img = nb.load(functional_path)
    masked_functional = functional_img.get_data()[mask]

    regressors = np.zeros(masked_functional.shape[-1])

    if summary['method'] == 'Mean':
        regressors = masked_functional.mean(0)

    if summary['method'] == 'NormMean':
        masked_functional /= np.linalg.norm(masked_functional, 2)
        regressors = np.nan_to_num(masked_functional).mean(0)

    if summary['method'] == 'DetrendNormMean':
        masked_functional = \
            signal.detrend(masked_functional, type='linear').T

        masked_functional /= np.linalg.norm(masked_functional, 2)
        regressors = np.nan_to_num(masked_functional).mean(0)

    if summary['method'] in ['DetrendPC', 'PC']:
        if summary['method'] == 'DetrendPC':
            Y = signal.detrend(masked_functional, type='linear').T
        else:
            Y = masked_functional.T

        Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1))
        Yc = np.nan_to_num(Yc / np.tile(np.array(Y.std(0)).reshape(1,Y.shape[1]), (Y.shape[0],1)))
        U, _, _ = np.linalg.svd(Yc)

        regressors = U[:, 0:summary['components']]

    output_file_path = os.path.join(os.getcwd(), 'summary_regressors.1D')
    np.savetxt(output_file_path, regressors, fmt='%.18f')

    return output_file_path
Example #11
0
def standardize(X,stdtype='column'):
	"""Standardizes a two dimensional input matrix X, by either row or column.  Resulting matrix will have 
	row or column mean 0 and row or column std. dev. equal to 1.0."""
	if len(X.shape) > 2:
		print 'ERROR: standardize() not defines for matrices that are not two-dimenional'
		return
	if stdtype == 'column':
		F = signal.detrend(X,type='constant',axis=0)
		F = F/std(F,axis=0)
	else:
	    F = signal.detrend(X.T,type='constant',axis=0)
	    F = F/std(F,axis=0)
	    F = F.T
	return F
    def generateAutoCorrelations(self,doDetrend=True):
        # auto correlation corefficient of u
        if doDetrend:
            ux=signal.detrend(self.ux());
            uy=signal.detrend(self.uy());
            uz=signal.detrend(self.uz());
        else:
            ux=self.ux();
            uy=self.uy();
            uz=self.uz();

        self.data['r11'],self.data['taur11'] = tt.xcorr_fft(ux, maxlags=None, norm='coeff')
        self.data['r22'],self.data['taur22'] = tt.xcorr_fft(uy, maxlags=None, norm='coeff')
        self.data['r33'],self.data['taur33'] = tt.xcorr_fft(uz, maxlags=None, norm='coeff')
Example #13
0
def deriv2_of_horiz_bands(img, hband_size):
    """For each horizontal band in image, calculate second
    derivatives of pixel densities.


    Divide the image into horizontal bands with each 
    band consisting of hband_size scanlines.

    For each band, sum the pixels in the vertical direction.

    Return the second difference of the summation in the
    corresponding result row, as well as the detrended
    sums divided by the standard deviation of the sums
    for each band.
    """

    #FIXME: a partial band at the bottom of the image is ignored
    img_height, img_width = img.shape[:2]
    n_bands = img_height / hband_size
    d2 = np.empty((n_bands, img_width-2))
    detr = np.empty((n_bands, img_width))
    for sl_index in range(n_bands):
        window_top = sl_index*hband_size
        band = img[window_top:window_top+hband_size,...]
        sum_ = band.sum(axis=0)
        d2[sl_index,...] = np.diff(sum_, n=2)
        dd = -signal.detrend(sum_)
        detr[sl_index,...] = dd / dd.std()
    return np.pad(d2, ((0,0), (1,1)), 'edge'), detr
Example #14
0
def enframe(x, win, inc):
    """
    Splits the vector up into (overlapping) frames beginning at increments
    of inc. Each frame is multiplied by the window win().
    The length of the frames is given by the length of the window win().
    The centre of frame I is x((I-1)*inc+(length(win)+1)/2) for I=1,2,...

    :param x: signal to split in frames
    :param win: window multiplied to each frame, length determines frame length
    :param inc: increment to shift frames, in samples
    :return f: output matrix, each frame occupies one row
    :return length, no_win: length of each frame in samples, number of frames
    """
    nx = len(x)
    nwin = len(win)
    if (nwin == 1):
        length = win
    else:
        # length = next_pow_2(nwin)
        length = nwin
    nf = int(fix((nx - length + inc) // inc))
    # f = np.zeros((nf, length))
    indf = inc * np.arange(nf)
    inds = np.arange(length) + 1
    f = x[(np.transpose(np.vstack([indf] * length)) +
           np.vstack([inds] * nf)) - 1]
    if (nwin > 1):
        w = np.transpose(win)
        f = f * np.vstack([w] * nf)
    f = signal.detrend(f, type='constant')
    no_win, _ = f.shape
    return f, length, no_win
Example #15
0
def decode_efm():
    """ Decode EFM from STDIN, assuming it's a 28Mhz 8bit raw stream  """
    datao = np.fromstring(sys.stdin.read(SAMPLES), dtype=np.uint8).astype(np.int16)
    datao = sps.detrend(datao, type='constant')  # Remove DC

    datao = auto_gain(datao, 10000, 'pre-filter')  # Expand before filtering, since we'll lose much of signal otherwise

    low_pass = sps.butter(4, 1.75 / FREQ_MHZ, btype='lowpass')  # Low pass at 1.75 Mhz
    datao = sps.lfilter(low_pass[0], low_pass[1], datao)

    high_pass = sps.butter(4, 0.01333 / FREQ_MHZ, btype='highpass')  # High pass at 13.333 khz
    datao = sps.lfilter(high_pass[0], high_pass[1], datao)

    # This is too slow, need to work out a way to do it in scipy
    de_emphasis_filter = biquad_filter(-1.8617006585639506, 0.8706642683920058, 0.947680874725466, -1.8659578411373265, 0.9187262110931641)
    datao = np.fromiter(run_filter(de_emphasis_filter, datao), np.int16)  # De-emph - 26db below 500khz

    # Could tie edge_pll and run_filter together as generators, but we want to see the filter output

    bit_gen = edge_pll(datao, EFM_PIXEL_RATE)  # This is a ultra-naive PLL that returns a bit-stream of 1 = edge, 0 = no-edge
    try:
        while 1:
            run_until_start_code(bit_gen)
            eat_three_bits(bit_gen)
            process_efm_frame(bit_gen, 31)  # 31 14 bit EFM codes in a frame
    except StopIteration:
        printerr('Hit the end of the bitstream')

    datao = np.clip(datao, 0, 255).astype(np.uint8)
    sys.stdout.write(datao.tostring())
Example #16
0
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
    """ Aux Function: Setup contrasts """
    sc, sy, = [], []

    # prepare computation of Kronecker products
    for n_levels in factor_levels:
        # for each factor append
        # 1) column vector of length == number of levels,
        # 2) square matrix with diagonal == number of levels

        # main + interaction effects for contrasts
        sc.append([np.ones([n_levels, 1]),
                   detrend(np.eye(n_levels), type='constant')])
        # main + interaction effects for component means
        sy.append([np.ones([n_levels, 1]) / n_levels, np.eye(n_levels)])
        # XXX component means not returned at the moment

    for (c1, c2, c3) in defaults_twoway_rm['iter_contrasts'][effect_picks]:
        # c1 selects the first factors' level in the column vector
        # c3 selects the actual factor
        # c2 selects either its column vector or diag matrix
        c_ = np.kron(sc[0][c1], sc[c3][c2])
        # for 3 way anova accumulation of c_ across factors required
        df1 = matrix_rank(c_)
        df2 = df1 * (n_subjects - 1)
        yield c_, df1, df2
Example #17
0
 def test_detrend_external_nd_0(self):
     x = np.arange(20, dtype=np.float64) + 0.04
     x = x.reshape((2,1,10))
     x = np.rollaxis(x, 2, 0)
     f, p = csd(x, x, nperseg=10, axis=0,
                detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
     assert_allclose(p, np.zeros_like(p), atol=1e-15)
Example #18
0
def multitaper_spectrogram(data, spec_params):

  data = _change_row_to_column(data)

  fs = spec_params.fs
  nfft = spec_params.nfft

  nstep = spec_params.nstep
  shift = spec_params.shift

  nblocks = _get_nblocks(len(data), shift, nstep)
  nfreqs = spec_params.nfreqs

  if spec_params.trial_avg:
    Ch = data.shape[1]
    S = np.zeros((nblocks, nfreqs, Ch))
  else:
    S = np.zeros((nblocks, nfreqs))
  for idx in xrange(nblocks):
    datawin = signal.detrend(
        data[idx * nstep:idx * nstep + shift], type == 'constant')
    if idx < 2:
      N = len(datawin)
      taps = _dpsschk(spec_params.tapers, N, fs)
    J = _mtfftc(datawin, taps, nfft, fs)[
        spec_params.findx, :]
    s = np.mean((np.multiply(np.conj(J), J)), axis=1).squeeze()
    if spec_params.trial_avg:
      s = np.mean(s, axis=1).squeeze()
    S[idx, :] = s

  spect = S.squeeze()
  return spect
Example #19
0
def sources_to_nifti(CHECKPOINT, MASKMAT, BASENIFTI, ONAME, savepath, voxels, win):
    bnifti = load_image(BASENIFTI)
    mask = loadmat(MASKMAT)['mask']
    model = np.load(CHECKPOINT) # Numpy array of sources from Infomax ICA

    for i in range(len(model)): # Goes component by component

        W = model[i,:].reshape([voxels,win])

        f = zeros(len(mask))
        idx = where(mask==1)
        data = zeros((bnifti.shape[0],bnifti.shape[1],bnifti.shape[2],W.shape[1]))

        f[idx[0].tolist()] = detrend(W)/std(W)

        for j in range(0,W.shape[1]):
            data[:,:,:,j] = reshape(f,(bnifti.shape[0],bnifti.shape[1],bnifti.shape[2] ), order='F')

        img = Image.from_image(bnifti,data=data)

        os.chdir(savepath)

        fn = ONAME + "%s.nii" % (str(i)) # Where result should be saved and under what name

        save_image(img,fn)
Example #20
0
def detrend(A,args,params):
    """
    Remove trend from data
    
    Remove the trend from the time series data in `A`. Several methods are \\
    possible. The method is specified as the value of the `type` keyword in
    the argument dictionary `args`.
    
    Possible `types` of detrending:
       -`constant` or `demean`: substract mean of traces
       
       -`linear`: substract a least squares fitted linear trend form the data
    
    :type A: numpy.ndarray
    :param A: time series data with time oriented along the first \\
        dimension (columns)
    :type args: dictionary
    :param args: the only used keyword is `type`
    :type params: dictionary
    :param params: not used here
    
    :rtype: numpy.ndarray
    :return: detrended time series data
    """
    # for compatibility with obspy
    if args['type'] == 'demean':
        args['type'] = 'constant'
    if args['type'] == 'detrend':
        args['type'] = 'linear'
    A = signal.detrend(A,axis=0,type=args['type'])
    return A
Example #21
0
 def _run_interface(self,runtime):
     """ linear detrending
     """
     
     print("Linear detrending")
     print("=================")
 
     # Output from previous preprocessing step
     ref_path = self.inputs.in_file
 
     # Load data
     dataimg = nib.load( ref_path )
     data = dataimg.get_data()
     tp = data.shape[3]
 
     # GLM: regress out nuisance covariates
     new_data_det = data.copy()
     gm = nib.load(self.inputs.gm_file[0]).get_data().astype( np.uint32 )
 
     for index,value in np.ndenumerate( gm ):
         if value == 0:
             continue
 
         Ydet = signal.detrend(data[index[0],index[1],index[2],:].reshape(tp,1), axis=0)
         new_data_det[index[0],index[1],index[2],:] = Ydet[:,0]
 
     img = nib.Nifti1Image(new_data_det, dataimg.get_affine(), dataimg.get_header())
     nib.save(img, os.path.abspath('fMRI_detrending.nii.gz'))
 
     print("[ DONE ]") 
     return runtime
Example #22
0
def spec(T, axis=0):
  '''Combined detrending, windowing and real-valued FFT over axis.

  Usually, the FFT is preceded by a detrending and windowing to reduce
  edge effects. This function performs the detrending, windowing and
  real-valued FFT.

  Parameters
  ----------
  T : ndarray
    Tensor containing data to be detrended.
  axis : int, optional
    Specifies the axis over which the spectrum is calculated.

  Returns
  -------
  out: ndarray
    Array similar to `T` where the time dimension is replace with
    it's frequency spectrum.

  See also:
  ---------
  windows : extract windows from continuous data.
  spec_weight : filter in the frequency domain.
  '''
  T = signal.detrend(T, axis=axis)
  win = np.hanning(T.shape[axis])
  win.shape = (np.where(np.arange(T.ndim) == axis, -1, 1))
  return np.fft.rfft(T * win, axis=axis)
def psd1d(hh = None,dx = 1.,tap = 0.05, detrend = True):


  hh = hh-numpy.mean(hh)
  nx = numpy.shape(hh)[0]

  if detrend:
    hh = signal.detrend(hh)

  if tap>0:
    ntaper = numpy.int(tap * nx + 0.5)
    taper = numpy.zeros(nx)+1.
    taper[:ntaper] = numpy.cos(numpy.arange(ntaper)/(ntaper-1.)*pi/2+3*pi/2)
    taper[-ntaper:] = numpy.cos(-numpy.arange(-ntaper+1,1)/(ntaper-1.)*pi/2+3*pi/2)
    hh = hh*taper

  ss = fft(hh)
  if nx % 2 != 0 : nx-=1

  ff = numpy.arange(1,nx/2-1)/(nx*dx)

  PSD = 2*dx/(nx)*numpy.abs(ss[1:int(nx/2)-1])**2


  return ff, PSD
Example #24
0
    def process_traces(self, s, h):
        """ Performs data processing operations on traces
        """
        # remove linear trend
        s = _signal.detrend(s)

        # mute 
        if PAR.MUTE:
            vel = PAR.MUTESLOPE
            off = PAR.MUTECONST
            inn = PAR.MUTEINNER
            # mute early arrivals
            s = smute(s, h, vel, off, inn, constant_spacing=False)
            # mute late arrivals
            vel = PAR.MUTESLOPE_BTM
            s = smutelow(s, h, vel, off, inn, constant_spacing=False)

        # filter data
        if PAR.FREQLO and PAR.FREQHI:
            s = sbandpass(s, h, PAR.FREQLO, PAR.FREQHI)


        # scale all traces by a single value (norm)
        if PAR.NORMALIZE_ALL:
	    sum_norm = np.linalg.norm(s, ord=2)
            if sum_norm > 0:
                s /= sum_norm

        return s
Example #25
0
File: base.py Project: Conxz/BIT
def detrend4d(para):
    """docs
    
    Parameters
    ----------
    para : 
        
    
    Contributions
    -------------
    Date : 
    Author : 
    Reviewer :

    """

    intnorm_file = para[0][0]
    mask_file = para[0][1]
    imgseries = load(intnorm_file)
    mask = load(mask_file)
    voxel_timecourses = imgseries.get_data()
    m = np.nonzero(mask.get_data())
    for a in range(len(m[1])):
        voxel_timecourses[m[0][a],m[1][a],m[2][a]] = detrend(voxel_timecourses[m[0][a],m[1][a],m[2][a]], type='linear') 
    imgseries.data = voxel_timecourses
    outfile = para[0][0] + '_dt.nii.gz'
    imgseries.to_filename(outfile)
    return outfile
Example #26
0
def smooth(X, tr=1.5, ub=0.10, lb=0.001):
    """Smooth columns in X.
    
    Parameters:
    -----------
    X - a 2d array with features in cols
    tr - the repetition time or sampling (in seconds)
    ub - upper bound of the band pass (Hz/2*tr)
    lb - lower bound of the band pass (Hz/2*tr)

    Note:
    ----
    Smoothing is a linear detrend followed by a bandpass filter from
    0.0625-0.15 Hz
    """

    # Linear detrend
    Xf = signal.detrend(X, axis=0, type="linear", bp=0)

    # Band pass
    ts = nt.TimeSeries(Xf.transpose(), sampling_interval=tr)
    Xf = nt.analysis.FilterAnalyzer(ts, ub=ub, lb=lb).fir.data
    ## ub and lb selected after some experimentation
    ## with the simiulated accumulator data
    ## ub=0.10, lb=0.001).fir.data

    Xf = Xf.transpose()
    ## TimeSeries assumes last axis is time, and we need
    ## the first axis to be time.

    return Xf
Example #27
0
    def test_dft_3d_dask(self, dask):
        """Test the discrete Fourier transform on 3D dask array data"""
        N=16
        da = xr.DataArray(np.random.rand(N,N,N), dims=['time','x','y'],
                          coords={'time':range(N),'x':range(N),
                                  'y':range(N)}
                         )
        if dask:
            da = da.chunk({'time': 1})
            daft = xrft.dft(da, dim=['x','y'], shift=False)
            npt.assert_almost_equal(daft.values,
                                   np.fft.fftn(da.chunk({'time': 1}).values,
                                              axes=[1,2])
                                   )
            da = da.chunk({'x': 1})
            with pytest.raises(ValueError):
                xrft.dft(da, dim=['x'])

            da = da.chunk({'time':N})
            daft = xrft.dft(da, dim=['time'],
                            shift=False, detrend='linear')
            da_prime = sps.detrend(da, axis=0)
            npt.assert_almost_equal(daft.values,
                                   np.fft.fftn(da_prime, axes=[0])
                                   )
Example #28
0
    def calc_compcor_components(data, nComponents, wm_sigs, csf_sigs):
        import scipy.signal as signal

        wmcsf_sigs = np.vstack((wm_sigs, csf_sigs)).astype("float32")

        # filter out any voxels whose variance equals 0
        print "Removing zero variance components"
        wmcsf_sigs = wmcsf_sigs[wmcsf_sigs.std(1) != 0, :]

        if wmcsf_sigs.shape.count(0):
            print "No wm or csf signals left after removing those with zero variance"
            raise IndexError

        print "Detrending and centering data"
        Y = signal.detrend(wmcsf_sigs, axis=1, type="linear").T
        Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1))
        Yc = Yc / np.tile(np.array(Y.std(0)).reshape(1, Y.shape[1]), (Y.shape[0], 1))

        print Yc.dtype

        print "Calculating SVD decomposition of Y*Y'"
        # U, S, Vh = np.linalg.svd(Yc)
        # U, S, Vh = scipy.linalg.svd(Yc)
        U, S, Vh = scipy.sparse.linalg.svds(Yc)
        return U[:, :nComponents]
Example #29
0
    def __call__(self, tvec, acc, gyro, mag):

        # Get the inclination measurements
        horRef = np.cross(self.sagittalDir, self.vertRefDir)
        (tauK, yinc) = inclination(acc, self.vertRefDir, horRef,
                                                self.g, self.gThreshold)
                                                # Integrate the projected zero-mean gyro data

        w = detrend(np.dot(gyro, self.sagittalDir), type='constant')
        wint = cumtrapz(w, tvec)
        wint = np.insert(wint, 0, wint[0])
        # translate the angle (add constant offset) to best match inclination
        # measurements
        # phi[tauK] = wint[tauK] + offset = yinc
        # 1*offset = yinc - wint[tauK]
        # offset = 1' * (yinc - wint[tauK])/len(tauK)
        phi = wint + np.mean(yinc - wint[tauK])

        self.phi = phi
        self.yinc = np.column_stack( (tvec[tauK], yinc))
        self.tvec = tvec
        self.gyrodta = gyro
        self.accdta = acc

        # Return a Quaternion array
        return [quat.QuaternionFromAxis(self.sagittalDir, phi_) for phi_ in self.phi]
Example #30
0
    def applyMeth(self, x, fMeth):
        """Apply the methods
        x : array signal
        fMeth : list of methods
        -> 3D array of the transform signal
        """
        npts, ntrial = x.shape
        nFce = len(fMeth)
        xf = n.zeros((nFce, npts, ntrial))

        # Detrend the signal :
        if self.dtrd:
            x = detrend(x, axis=0)

        # Apply methods :
        for k in range(0, nFce):  # For each frequency in the tuple
            xf[k, ...] = fMeth[k](x)

        # Correction for the wavelet (due to the wavelet width):
        if (self.method == 'wavelet') and (self.wltCorr is not None):
            w = 3*self.wltWidth
            xf[:, 0:w, :] = xf[:, w+1:2*w+1, :]
            xf[:, npts-w:npts, :] = xf[:, npts-2*w-1:npts-w-1, :]

        return xf
Example #31
0
def main(parser):

    # print(bcolors.HEADER + "*** Conduction Velocity Analyzer ***" + bcolors.ENDC) # on windows cmd colors don't work
    print('\n***************************************')
    print('*** Conduction Velocity Analyzer ******')
    print('\n***************************************')

    # parse arguments
    args = parser.parse_args()

    txt_path = args.input_filepath[0]

    # extract filename and directory path
    filename = os.path.basename(txt_path)
    folderpath = os.path.dirname(txt_path)

    # extract experiment parameters from filename and print info to console
    burst, param = extract_info(filename, _print=True)

    # create excel file
    wb, sheet, out_path, n_col_sheet_block = prepare_excel_wb(
        folderpath, filename, burst, param)

    # read OM tracks values
    values = np.loadtxt(txt_path, dtype=np.float, usecols=(0, 1, 2))
    param[pKeys.exp_duration] = values.shape[0] / param[pKeys.frame_rate]
    print("- Duration of record : {} ms".format(param[pKeys.exp_duration]))

    # split timing, roi1 (apex) and roi2 (base) values
    full_ms, full_apex, full_base = values[:, 0], values[:, 1], values[:, 2]
    ''' ===== [ START ANALISIS ] ===== - for each burst '''
    for (i_burst, b) in enumerate(burst):

        print('\n\n********* Analyzing Burst at {}Hz *********'.format(
            b[bKeys.freq_stim]))

        # extract ramp
        ms = full_ms[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        apex = full_apex[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        base = full_base[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        # plot_signals(apex, base, title="Ramp at {}Hz".format(b[keys.freq_stim]))

        # detrend signals
        apex_flat = sign.detrend(apex, type='linear')
        base_flat = sign.detrend(base, type='linear')

        # plot original and flattened
        # plot_signals(apex, base, title="Original Tracks")
        # plot_signals(apex_flat, base_flat, title="Detrended Tracks")

        # if selected, apply median filter and plot results
        if param[pKeys.rank] is not 0:
            apex_filt = sign.medfilt(apex_flat, kernel_size=param[pKeys.rank])
            base_filt = sign.medfilt(base_flat, kernel_size=param[pKeys.rank])
            # plot_signals(apex_filt, base_filt, title="Filtered Tracks (rank = {})".format(param[keys.rank]))

        # ENSURE TO USE THE RIGHT SIGNAL (filtered or only flattened)
        (apex_sgn,
         base_sgn) = (apex_filt,
                      base_filt) if param[pKeys.rank] is not 0 else (apex_flat,
                                                                     base_flat)
        ''' ===== [ FIND PEAKS, PERIODS and SELECT AP TO ANALYZE ] ===== '''

        # find peaks - restituisce (x, y) dei picchi
        # -> io passo il segnale invertito (perchè usa il 'max')
        # impongo che i picchi siano distanti fra loro almeno più di 2/3 del periodo
        # e prendo solo la x (tempo): [0]-> prendi solo la x del picco
        a_peaks = sign.find_peaks(-apex_sgn,
                                  distance=(2 / 3) * b[bKeys.AP_duration])[0]
        b_peaks = sign.find_peaks(-base_sgn,
                                  distance=(2 / 3) * b[bKeys.AP_duration])[0]

        # plotto i segnali con picchi e durate dei periodi
        plot_signals_with_peaks([apex_sgn, base_sgn], [a_peaks, b_peaks],
                                ['Apex', 'Base'],
                                "Ramp at {}Hz".format(b[bKeys.freq_stim]),
                                _enum_peaks=True,
                                _plot_intervals=True,
                                frame_rate=param[pKeys.frame_rate])

        # stima durata di ogni AP da differenza picchi consecutivi nell'apice
        print("- Control of stimulation frequency in the apex signal... ")
        AP_periods = np.diff(a_peaks) / param[pKeys.frame_rate]
        freq_stim_estimated = 1 / np.mean(AP_periods / 1000)
        print("-- Stimulation Frequency obtained: {0:0.1f}Hz".format(
            freq_stim_estimated))

        # user can select which potentials use to estimmate mean conduction velocity
        selection = input(
            "\n***AP selection to estimate mean current velocity.\n"
            "  Insert AP indexex between spaces. Example:\n"
            "  0 2 3 5 7 8 9 [<- then press 'Enter'].\n"
            "  Please, enter your selection here and the press 'Enter': \n")
        # extract selected indexes
        ap_selected_idx = selection.split(' ')
        print('- AP selected for Conduction Velocity Estimation: ')
        for l in ap_selected_idx:
            print(l, end='°, ')
        ''' ===== [ ANALYZE EACH ACTION POTENTIALS TO FIND DELAY APEX-BASE ] ===== '''
        # USE INTERPOLATION AND CROSS-CORRELATION TO ESTIMATE DELAY
        # from selected AP potentials
        # Idea: per ogni picco, prendo l'intorno giusto per selezionare il potenziale d'azione,
        # interpolo, calcolo delay usando il picco della cross-correlazione e poi medio tutti i delay

        cv_list = list()  # list of conduction velocity extracted for each AP

        # for each AP selected
        for (i_spike, spike_num) in enumerate(
                np.asarray(ap_selected_idx).astype(np.int)):

            # calculate extremes of selected action potential signal
            t1, t2 = np.int(b[bKeys.AP_duration] * spike_num), np.int(
                b[bKeys.AP_duration] * (spike_num + 1))
            ms_sel = ms[t1:t2]  # time
            base_sel = base_sgn[t1:t2]  # base
            apex_sel = apex_sgn[t1:t2]  # apex

            # interpolo i due segnali di fattore 0.2 (come su LabView)
            dt = 0.2

            # calcolo funzione di interpolazione delle due tracce
            f_apex = interpolate.interp1d(ms_sel, apex_sel)
            f_base = interpolate.interp1d(ms_sel, base_sel)

            # creo nuovo asse temporale
            ms_res = np.arange(ms_sel[0], ms_sel[-1], dt)

            # resample signals using interpolation functions calculated above
            apex_res = f_apex(ms_res)
            base_res = f_base(ms_res)

            # estimate delay by cross-correlation max values
            delay_ms = lag_finder_in_ms(apex_res, base_res, 1000 / dt)

            # estimate and save conduction velocity
            cv = param[pKeys.ROI_distance_mm] / delay_ms
            cv_list.append(cv)

            # write spike_num and cv
            sheet.write(i_spike + 7, (i_burst * (n_col_sheet_block + 1)) + 1,
                        "{}".format(int(spike_num)))
            sheet.write(i_spike + 7, (i_burst * (n_col_sheet_block + 1)) + 2,
                        "{0:0.3f}".format(cv))

        # estimate mean and std. error
        avg = np.mean(np.asarray(cv_list))
        sem = stats.sem(np.asarray(cv_list))

        # write mean and std. error into excel file
        sheet.write(17, (i_burst * (n_col_sheet_block + 1)) + 2,
                    "{0:0.3f}".format(avg))
        sheet.write(18, (i_burst * (n_col_sheet_block + 1)) + 2,
                    "{0:0.3f}".format(sem))

        # print results and average
        print("*** RESULTS:")
        print("- Conduction velocities in m/s:")
        for cv in cv_list:
            print("-- {0:0.3f} m/s".format(cv))

        print("\n- Average Conduction velocity: ", end='')
        print("-- {0:0.3f} +- {1:0.3f} m/s".format(avg, sem))

    # save excel file
    wb.save(out_path)
    print('\nOutput saved in:')
    print(out_path)
    print(
        ' --------- Conduction Velocity Analyzer: Process finished. ---------\n'
    )
Example #32
0
def _nuisanceRegression(subj,
                        run,
                        inputdata,
                        outputdir,
                        model='24pXaCompCorXVolterra',
                        spikeReg=False,
                        zscore=False,
                        nproc=8):
    """
    This function runs nuisance regression on the Glasser Parcels (360) on a single subjects run
    Will only regress out noise parameters given the model choice (see below for model options)

    Input parameters:
        subj    : subject number as a string
        run     : task run
        outputdir: Directory for GLM output, as an h5 file (each run will be contained within each h5)
        model   : model choices for linear regression. Models include:
                    1. 24pXaCompCorXVolterra [default]
                        Variant from Ciric et al. 2017. 
                        Includes (64 regressors total):
                            - Movement parameters (6 directions; x, y, z displacement, and 3 rotations) and their derivatives, and their quadratics (24 regressors)
                            - aCompCor (5 white matter and 5 ventricle components) and their derivatives, and their quadratics (40 regressors)
                    2. 18p (the legacy default)
                        Includes (18 regressors total):
                            - Movement parameters (6 directions) and their derivatives (12 regressors)
                            - Global signal and its derivative (2 regressors)
                            - White matter signal and its derivative (2 regressors)
                            - Ventricles signal and its derivative (2 regressors)
                    3. 16pNoGSR (the legacy default, without GSR)
                        Includes (16 regressors total):
                            - Movement parameters (6 directions) and their derivatives (12 regressors)
                            - White matter signal and its derivative (2 regressors)
                            - Ventricles signal and its derivative (2 regressors)
                    4. 12pXaCompCor (Typical motion regression, but using CompCor (noGSR))
                        Includes (32 regressors total):
                            - Movement parameters (6 directions) and their derivatives (12 regressors)
                            - aCompCor (5 white matter and 5 ventricle components) and their derivatives (no quadratics; 20 regressors)
                    5. 36p (State-of-the-art, according to Ciric et al. 2017)
                        Includes (36 regressors total - same as legacy, but with quadratics):
                            - Movement parameters (6 directions) and their derivatives and quadratics (24 regressors)
                            - Global signal and its derivative and both quadratics (4 regressors)
                            - White matter signal and its derivative and both quadratics (4 regressors)
                            - Ventricles signal and its derivative (4 regressors)
        spikeReg : spike regression (Satterthwaite et al. 2013) [True/False]
                        Note, inclusion of this will add additional set of regressors, which is custom for each subject/run
        zscore   : Normalize data (across time) prior to fitting regression
        nproc = number of processes to use via multiprocessing
    """

    data = inputdata

    tMask = np.ones((data.shape[1], ))
    tMask[:framesToSkip] = 0

    # Skip frames
    data = data[:, framesToSkip:]

    # Demean each run
    data = signal.detrend(data, axis=1, type='constant')
    # Detrend each run
    data = signal.detrend(data, axis=1, type='linear')
    tMask = np.asarray(tMask, dtype=bool)

    nROIs = data.shape[0]

    # Load nuisance regressors for this data
    h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5', 'r')
    if model == '24pXaCompCorXVolterra':
        # Motion parameters + derivatives
        motion_parameters = h5f[run]['motionParams'][:].copy()
        motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
        # WM aCompCor + derivatives
        aCompCor_WM = h5f[run]['aCompCor_WM'][:].copy()
        aCompCor_WM_deriv = h5f[run]['aCompCor_WM_deriv'][:].copy()
        # Ventricles aCompCor + derivatives
        aCompCor_ventricles = h5f[run]['aCompCor_ventricles'][:].copy()
        aCompCor_ventricles_deriv = h5f[run][
            'aCompCor_ventricles_deriv'][:].copy()
        # Create nuisance regressors design matrix
        nuisanceRegressors = np.hstack(
            (motion_parameters, motion_parameters_deriv, aCompCor_WM,
             aCompCor_WM_deriv, aCompCor_ventricles,
             aCompCor_ventricles_deriv))
        quadraticRegressors = nuisanceRegressors**2
        nuisanceRegressors = np.hstack(
            (nuisanceRegressors, quadraticRegressors))

    elif model == '18p':
        # Motion parameters + derivatives
        motion_parameters = h5f[run]['motionParams'][:].copy()
        motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
        # Global signal + derivatives
        global_signal = h5f[run]['global_signal'][:].copy()
        global_signal_deriv = h5f[run]['global_signal_deriv'][:].copy()
        # white matter signal + derivatives
        wm_signal = h5f[run]['wm_signal'][:].copy()
        wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
        # ventricle signal + derivatives
        ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
        ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
        # Create nuisance regressors design matrix
        tmp = np.vstack(
            (global_signal, global_signal_deriv, wm_signal, wm_signal_deriv,
             ventricle_signal, ventricle_signal_deriv
             )).T  # Need to vstack, since these are 1d arrays
        nuisanceRegressors = np.hstack(
            (motion_parameters, motion_parameters_deriv, tmp))

    elif model == '16pNoGSR':
        # Motion parameters + derivatives
        motion_parameters = h5f[run]['motionParams'][:].copy()
        motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
        # white matter signal + derivatives
        wm_signal = h5f[run]['wm_signal'][:].copy()
        wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
        # ventricle signal + derivatives
        ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
        ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
        # Create nuisance regressors design matrix
        tmp = np.vstack((wm_signal, wm_signal_deriv, ventricle_signal,
                         ventricle_signal_deriv
                         )).T  # Need to vstack, since these are 1d arrays
        nuisanceRegressors = np.hstack(
            (motion_parameters, motion_parameters_deriv, tmp))

    elif model == '12pXaCompCor':
        # Motion parameters + derivatives
        motion_parameters = h5f[run]['motionParams'][:].copy()
        motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
        # WM aCompCor + derivatives
        aCompCor_WM = h5f[run]['aCompCor_WM'][:].copy()
        aCompCor_WM_deriv = h5f[run]['aCompCor_WM_deriv'][:].copy()
        # Ventricles aCompCor + derivatives
        aCompCor_ventricles = h5f[run]['aCompCor_ventricles'][:].copy()
        aCompCor_ventricles_deriv = h5f[run][
            'aCompCor_ventricles_deriv'][:].copy()
        # Create nuisance regressors design matrix
        nuisanceRegressors = np.hstack(
            (motion_parameters, motion_parameters_deriv, aCompCor_WM,
             aCompCor_WM_deriv, aCompCor_ventricles,
             aCompCor_ventricles_deriv))

    elif model == '36p':
        # Motion parameters + derivatives
        motion_parameters = h5f[run]['motionParams'][:].copy()
        motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
        # Global signal + derivatives
        global_signal = h5f[run]['global_signal'][:].copy()
        global_signal_deriv = h5f[run]['global_signal_deriv'][:].copy()
        # white matter signal + derivatives
        wm_signal = h5f[run]['wm_signal'][:].copy()
        wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
        # ventricle signal + derivatives
        ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
        ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
        # Create nuisance regressors design matrix
        tmp = np.vstack(
            (global_signal, global_signal_deriv, wm_signal, wm_signal_deriv,
             ventricle_signal, ventricle_signal_deriv
             )).T  # Need to vstack, since these are 1d arrays
        nuisanceRegressors = np.hstack(
            (motion_parameters, motion_parameters_deriv, tmp))
        quadraticRegressors = nuisanceRegressors**2
        nuisanceRegressors = np.hstack(
            (nuisanceRegressors, quadraticRegressors))

    if spikeReg:
        # Obtain motion spikes
        try:
            motion_spikes = h5f[run]['motionSpikes'][:].copy()
            nuisanceRegressors = np.hstack((nuisanceRegressors, motion_spikes))
        except:
            print 'Spike regression option was chosen... but no motion spikes for subj', subj, '| run', run, '!'
        # Update the model name - to keep track of different model types for output naming
        model = model + '_spikeReg'

    if zscore:
        model = model + '_zscore'

    h5f.close()
    # Skip first 5 frames of nuisanceRegressors, too
    nuisanceRegressors = nuisanceRegressors[framesToSkip:, :].copy()

    betas, resid = regression.regression(data.T,
                                         nuisanceRegressors,
                                         constant=True)

    betas = betas.T  # Exclude nuisance regressors
    residual_ts = resid.T

    if zscore:
        residual_ts = stats.zscore(residual_ts, axis=1)

    outname1 = run + '/nuisanceReg_resid_' + model
    outname2 = run + '/nuisanceReg_betas_' + model

    outputfilename = outputdir + subj + '_glmOutput_data.h5'
    h5f = h5py.File(outputfilename, 'a')
    try:
        h5f.create_dataset(outname1, data=residual_ts)
        h5f.create_dataset(outname2, data=betas)
    except:
        del h5f[outname1], h5f[outname2]
        h5f.create_dataset(outname1, data=residual_ts)
        h5f.create_dataset(outname2, data=betas)
    h5f.close()
Example #33
0
 def linear_detrend(self):
     """Remove linear trend from `values`"""
     self.values = detrend(self.values, type="linear")
     self._setattr("proc_linear_detrend")
     return self
Example #34
0
def _calc_norm_affine(affines, use_differences, brain_pts=None):
    """Calculates the maximum overall displacement of the midpoints
    of the faces of a cube due to translation and rotation.

    Parameters
    ----------
    affines : list of [4 x 4] affine matrices
    use_differences : boolean
    brain_pts : [4 x n_points] of coordinates

    Returns
    -------

    norm : at each time point
    displacement : euclidean distance (mm) of displacement at each coordinate

    """

    if brain_pts is None:
        respos = np.diag([70, 70, 75])
        resneg = np.diag([-70, -110, -45])
        all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
        displacement = None
    else:
        all_pts = brain_pts
    n_pts = all_pts.size - all_pts.shape[1]
    newpos = np.zeros((len(affines), n_pts))
    if brain_pts is not None:
        displacement = np.zeros((len(affines), int(n_pts / 3)))
    for i, affine in enumerate(affines):
        newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel()
        if brain_pts is not None:
            displacement[i, :] = np.sqrt(
                np.sum(
                    np.power(
                        np.reshape(newpos[i, :], (3, all_pts.shape[1]))
                        - all_pts[0:3, :],
                        2,
                    ),
                    axis=0,
                )
            )
    # np.savez('displacement.npz', newpos=newpos, pts=all_pts)
    normdata = np.zeros(len(affines))
    if use_differences:
        newpos = np.concatenate(
            (np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0
        )
        for i in range(newpos.shape[0]):
            normdata[i] = np.max(
                np.sqrt(
                    np.sum(
                        np.reshape(
                            np.power(np.abs(newpos[i, :]), 2), (3, all_pts.shape[1])
                        ),
                        axis=0,
                    )
                )
            )
    else:
        from scipy.signal import detrend

        newpos = np.abs(detrend(newpos, axis=0, type="constant"))
        normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
    return normdata, displacement
 def __demean_filter(self):
     # Compute the mean using a sliding window
     filtered = sig.detrend(self.__data_buffer)
     self.__data_buffer = filtered
     return
Example #36
0
def update():
    global specmax_curr, specmin_curr, specmax_hist, specmin_hist, fft_prev, fft_hist, redfreq, redwidth, bluefreq, bluewidth, counter, history

    # get last data
    last_index = ft_input.getHeader().nSamples
    begsample = (last_index - window)
    endsample = (last_index - 1)
    data = ft_input.getData([begsample, endsample])

    if debug > 0:
        print("reading from sample %d to %d" % (begsample, endsample))

    # demean and detrend data before filtering to reduce edge artefacts and center timecourse
    data = detrend(data, axis=0)

    # taper data
    taper = np.hanning(len(data))
    data = data * taper[:, np.newaxis]

    # shift data to next sample
    history = np.roll(history, 1, axis=2)

    for ichan in range(numchannel):
        channr = int(chanarray[ichan])

        # estimate FFT at current moment, apply some temporal smoothing
        fft_temp = abs(fft(data[:, channr]))
        fft_curr[ichan] = fft_temp * lrate + fft_prev[ichan] * (1 - lrate)
        fft_prev[ichan] = fft_curr[ichan]

        # update FFT history with current estimate
        history[ichan, :, numhistory - 1] = fft_temp
        fft_hist = np.nanmean(history, axis=2)

        # user-selected frequency band
        arguments_freqrange = patch.getfloat('arguments',
                                             'freqrange',
                                             multiple=True)
        freqrange = np.greater(freqaxis,
                               arguments_freqrange[0]) & np.less_equal(
                                   freqaxis, arguments_freqrange[1])

        # update panels
        spect_curr[ichan].setData(freqaxis[freqrange],
                                  fft_curr[ichan][freqrange])
        spect_hist[ichan].setData(freqaxis[freqrange],
                                  fft_hist[ichan][freqrange])

        # adapt the vertical scale to the running mean of min/max
        specmax_curr[ichan] = float(specmax_curr[ichan]) * (
            1 - lrate) + lrate * max(fft_curr[ichan][freqrange])
        specmin_curr[ichan] = float(specmin_curr[ichan]) * (
            1 - lrate) + lrate * min(fft_curr[ichan][freqrange])
        specmax_hist[ichan] = float(specmax_hist[ichan]) * (
            1 - lrate) + lrate * max(fft_hist[ichan][freqrange])
        specmin_hist[ichan] = float(specmin_hist[ichan]) * (
            1 - lrate) + lrate * min(fft_hist[ichan][freqrange])

        freqplot_curr[ichan].setXRange(arguments_freqrange[0],
                                       arguments_freqrange[1])
        freqplot_hist[ichan].setXRange(arguments_freqrange[0],
                                       arguments_freqrange[1])
        freqplot_curr[ichan].setYRange(specmin_curr[ichan],
                                       specmax_curr[ichan])
        freqplot_hist[ichan].setYRange(specmin_hist[ichan],
                                       specmax_hist[ichan])

        # update plotted lines
        redfreq = patch.getfloat('input',
                                 'redfreq',
                                 default=10. / arguments_freqrange[1])
        redfreq = EEGsynth.rescale(redfreq, slope=scale_red,
                                   offset=offset_red) * arguments_freqrange[1]
        redwidth = patch.getfloat('input',
                                  'redwidth',
                                  default=1. / arguments_freqrange[1])
        redwidth = EEGsynth.rescale(redwidth,
                                    slope=scale_red,
                                    offset=offset_red) * arguments_freqrange[1]
        bluefreq = patch.getfloat('input',
                                  'bluefreq',
                                  default=20. / arguments_freqrange[1])
        bluefreq = EEGsynth.rescale(
            bluefreq, slope=scale_blue,
            offset=offset_blue) * arguments_freqrange[1]
        bluewidth = patch.getfloat('input',
                                   'bluewidth',
                                   default=4. / arguments_freqrange[1])
        bluewidth = EEGsynth.rescale(
            bluewidth, slope=scale_blue,
            offset=offset_blue) * arguments_freqrange[1]

        if showred:
            redleft_curr[ichan].setData(
                x=[redfreq - redwidth, redfreq - redwidth],
                y=[specmin_curr[ichan], specmax_curr[ichan]])
            redright_curr[ichan].setData(
                x=[redfreq + redwidth, redfreq + redwidth],
                y=[specmin_curr[ichan], specmax_curr[ichan]])
        if showblue:
            blueleft_curr[ichan].setData(
                x=[bluefreq - bluewidth, bluefreq - bluewidth],
                y=[specmin_curr[ichan], specmax_curr[ichan]])
            blueright_curr[ichan].setData(
                x=[bluefreq + bluewidth, bluefreq + bluewidth],
                y=[specmin_curr[ichan], specmax_curr[ichan]])
        if showred:
            redleft_hist[ichan].setData(
                x=[redfreq - redwidth, redfreq - redwidth],
                y=[specmin_hist[ichan], specmax_hist[ichan]])
            redright_hist[ichan].setData(
                x=[redfreq + redwidth, redfreq + redwidth],
                y=[specmin_hist[ichan], specmax_hist[ichan]])
        if showblue:
            blueleft_hist[ichan].setData(
                x=[bluefreq - bluewidth, bluefreq - bluewidth],
                y=[specmin_hist[ichan], specmax_hist[ichan]])
            blueright_hist[ichan].setData(
                x=[bluefreq + bluewidth, bluefreq + bluewidth],
                y=[specmin_hist[ichan], specmax_hist[ichan]])

    # update labels at plotted lines
    if showred:
        text_redleft.setText('%0.1f' % (redfreq - redwidth))
        text_redleft.setPos(redfreq - redwidth, specmax_curr[0])
        text_redright.setText('%0.1f' % (redfreq + redwidth))
        text_redright.setPos(redfreq + redwidth, specmax_curr[0])
    else:
        text_redleft.setText("")
        text_redright.setText("")
    if showblue:
        text_blueleft.setText('%0.1f' % (bluefreq - bluewidth))
        text_blueleft.setPos(bluefreq - bluewidth, specmax_curr[0])
        text_blueright.setText('%0.1f' % (bluefreq + bluewidth))
        text_blueright.setPos(bluefreq + bluewidth, specmax_curr[0])
    else:
        text_blueleft.setText("")
        text_blueright.setText("")

    if showred:
        text_redleft_hist.setText('%0.1f' % (redfreq - redwidth))
        text_redleft_hist.setPos(redfreq - redwidth, specmax_hist[0])
        text_redright_hist.setText('%0.1f' % (redfreq + redwidth))
        text_redright_hist.setPos(redfreq + redwidth, specmax_hist[0])
    else:
        text_redleft_hist.setText("")
        text_redright_hist.setText("")
    if showblue:
        text_blueleft_hist.setText('%0.1f' % (bluefreq - bluewidth))
        text_blueleft_hist.setPos(bluefreq - bluewidth, specmax_hist[0])
        text_blueright_hist.setText('%0.1f' % (bluefreq + bluewidth))
        text_blueright_hist.setPos(bluefreq + bluewidth, specmax_hist[0])
    else:
        text_blueleft_hist.setText("")
        text_blueright_hist.setText("")

    key = "%s.%s.%s" % (prefix, 'redband', 'low')
    patch.setvalue(key, redfreq - redwidth)
    key = "%s.%s.%s" % (prefix, 'redband', 'high')
    patch.setvalue(key, redfreq + redwidth)
    key = "%s.%s.%s" % (prefix, 'blueband', 'low')
    patch.setvalue(key, bluefreq - bluewidth)
    key = "%s.%s.%s" % (prefix, 'blueband', 'high')
    patch.setvalue(key, bluefreq + bluewidth)
Example #37
0
def compute_wavelets_tremor(lats, lons, radius_tremor, wavelet, J):
    """
    """
    # Read tremor files (A. Wech)
    data_2009 = pd.read_csv('../data/tremor/tremor_events-2009-08-06T00 00 00-2009-12-31T23 59 59.csv')
    data_2009['time '] = pd.to_datetime(data_2009['time '], format='%Y-%m-%d %H:%M:%S')
    data_2010 = pd.read_csv('../data/tremor/tremor_events-2010-01-01T00 00 00-2010-12-31T23 59 59.csv')
    data_2010['time '] = pd.to_datetime(data_2010['time '], format='%Y-%m-%d %H:%M:%S')
    data_2011 = pd.read_csv('../data/tremor/tremor_events-2011-01-01T00 00 00-2011-12-31T23 59 59.csv')
    data_2011['time '] = pd.to_datetime(data_2011['time '], format='%Y-%m-%d %H:%M:%S')
    data_2012 = pd.read_csv('../data/tremor/tremor_events-2012-01-01T00 00 00-2012-12-31T23 59 59.csv')
    data_2012['time '] = pd.to_datetime(data_2012['time '], format='%Y-%m-%d %H:%M:%S')
    data_2013 = pd.read_csv('../data/tremor/tremor_events-2013-01-01T00 00 00-2013-12-31T23 59 59.csv')
    data_2013['time '] = pd.to_datetime(data_2013['time '], format='%Y-%m-%d %H:%M:%S')
    data_2014 = pd.read_csv('../data/tremor/tremor_events-2014-01-01T00 00 00-2014-12-31T23 59 59.csv')
    data_2014['time '] = pd.to_datetime(data_2014['time '], format='%Y-%m-%d %H:%M:%S')
    data_2015 = pd.read_csv('../data/tremor/tremor_events-2015-01-01T00 00 00-2015-12-31T23 59 59.csv')
    data_2015['time '] = pd.to_datetime(data_2015['time '], format='%Y-%m-%d %H:%M:%S')
    data_2016 = pd.read_csv('../data/tremor/tremor_events-2016-01-01T00 00 00-2016-12-31T23 59 59.csv')
    data_2016['time '] = pd.to_datetime(data_2016['time '], format='%Y-%m-%d %H:%M:%S')
    data_2017 = pd.read_csv('../data/tremor/tremor_events-2017-01-01T00 00 00-2017-12-31T23 59 59.csv')
    data_2017['time '] = pd.to_datetime(data_2017['time '], format='%Y-%m-%d %H:%M:%S')
    data_2018 = pd.read_csv('../data/tremor/tremor_events-2018-01-01T00 00 00-2018-12-31T23 59 59.csv')
    data_2018['time '] = pd.to_datetime(data_2018['time '], format='%Y-%m-%d %H:%M:%S')
    data_2019 = pd.read_csv('../data/tremor/tremor_events-2019-01-01T00 00 00-2019-12-31T23 59 59.csv')
    data_2019['time '] = pd.to_datetime(data_2019['time '], format='%Y-%m-%d %H:%M:%S')
    data_2020 = pd.read_csv('../data/tremor/tremor_events-2020-01-01T00 00 00-2020-12-31T23 59 59.csv')
    data_2020['time '] = pd.to_datetime(data_2020['time '], format='%Y-%m-%d %H:%M:%S')
    data_2021 = pd.read_csv('../data/tremor/tremor_events-2021-01-01T00 00 00-2021-04-29T23 59 59.csv')
    data_2021['time '] = pd.to_datetime(data_2020['time '], format='%Y-%m-%d %H:%M:%S')
    data = pd.concat([data_2009, data_2010, data_2011, data_2012, data_2013, \
        data_2014, data_2015, data_2016, data_2017, data_2018, data_2019, \
        data_2020, data_2021])
    data.reset_index(drop=True, inplace=True)

    # To convert lat/lon into kilometers
    a = 6378.136
    e = 0.006694470

    # Time vector
    time = pickle.load(open('tmp/time.pkl', 'rb'))

    # Loop on latitude and longitude
    for index, (lat, lon) in enumerate(zip(lats, lons)):

        # Keep only tremor in a given radius
        dx = (pi / 180.0) * a * cos(lat * pi / 180.0) / sqrt(1.0 - e * e * \
            sin(lat * pi / 180.0) * sin(lat * pi / 180.0))
        dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * \
            sin(lat * pi / 180.0) * sin(lat * pi / 180.0)) ** 1.5)
        x = dx * (data['lon'] - lon)
        y = dy * (data['lat'] - lat)
        distance = np.sqrt(np.power(x, 2.0) + np.power(y, 2.0))
        data['distance'] = distance
        tremor = data.loc[data['distance'] <= radius_tremor].copy()
        tremor.reset_index(drop=True, inplace=True)
 
        # Convert tremor time
        nt = len(tremor)
        time_tremor = np.zeros(nt)
        for i in range(0, nt):
            year = tremor['time '].loc[i].year
            month = tremor['time '].loc[i].month
            day = tremor['time '].loc[i].day
            hour = tremor['time '].loc[i].hour
            minute = tremor['time '].loc[i].minute
            second = tremor['time '].loc[i].second
            time_tremor[i] = date.ymdhms2day(year, month, day, hour, minute, second)

        # Interpolate
        tremor = np.interp(time, np.sort(time_tremor), (1.0 / nt) * np.arange(0, len(time_tremor)))

        # Start figure
        params = {'xtick.labelsize':24,
                  'ytick.labelsize':24}
        pylab.rcParams.update(params)   
        fig = plt.figure(1, figsize=(60, 5 * (J + 3)))

        # First method: Detrending
        tremor_detrend = detrend(tremor)

        # MODWT
        (W, V) = pyramid(tremor_detrend, wavelet, J)
        (D, S) = get_DS(tremor_detrend, W, wavelet, J)

        # Save wavelets
#        pickle.dump([time, tremor_detrend, W, V, D, S], \
#            open('tmp/tremor_' + str(index) + '.pkl', 'wb'))
        
        maxD = max([np.max(Dj) for Dj in D])
        minD = min([np.min(Dj) for Dj in D])

        # Plot data
        plt.subplot2grid((J + 2, 4), (J + 1, 0))
        plt.plot(time, tremor_detrend, 'k', label='Data')
        plt.legend(loc=3, fontsize=14)
        # Plot details
        for j in range(0, J):
            plt.subplot2grid((J + 2, 4), (J - j, 0))
            plt.plot(time, D[j], 'k', label='D' + str(j + 1))
            plt.ylim(minD, maxD)
            plt.legend(loc=3, fontsize=14)
        # Plot smooth
        plt.subplot2grid((J + 2, 4), (0, 0))
        plt.plot(time, S[J], 'k', label='S' + str(J))
        plt.ylim(minD, maxD)
        plt.legend(loc=3, fontsize=14)
        plt.title('Detrended data', fontsize=24)

        # Second method: Moving average (5 days)
        ma_filter = np.repeat(1, 5) / 5
        tremor_averaged_5 = tremor[:-4] - np.convolve(tremor, ma_filter, 'valid')

        # MODWT
        (W, V) = pyramid(tremor_averaged_5, wavelet, J)
        (D, S) = get_DS(tremor_averaged_5, W, wavelet, J)
        
        maxD = max([np.max(Dj) for Dj in D])
        minD = min([np.min(Dj) for Dj in D])

        # Plot data
        plt.subplot2grid((J + 2, 4), (J + 1, 1))
        plt.plot(time[:-4], tremor_averaged_5, 'k', label='Data')
        plt.legend(loc=3, fontsize=14)
        # Plot details
        for j in range(0, J):
            plt.subplot2grid((J + 2, 4), (J - j, 1))
            plt.plot(time[:-4], D[j], 'k', label='D' + str(j + 1))
            plt.ylim(minD, maxD)
            plt.legend(loc=3, fontsize=14)
        # Plot smooth
        plt.subplot2grid((J + 2, 4), (0, 1))
        plt.plot(time[:-4], S[J], 'k', label='S' + str(J))
        plt.ylim(minD, maxD)
        plt.legend(loc=3, fontsize=14)
        plt.title('Moving average (5 days)', fontsize=24)

        # Second method: Moving average (15 days)
        ma_filter = np.repeat(1, 15) / 15
        tremor_averaged_15 = tremor[:-14] - np.convolve(tremor, ma_filter, 'valid')

        # MODWT
        (W, V) = pyramid(tremor_averaged_15, wavelet, J)
        (D, S) = get_DS(tremor_averaged_15, W, wavelet, J)
        
        maxD = max([np.max(Dj) for Dj in D])
        minD = min([np.min(Dj) for Dj in D])

        # Plot data
        plt.subplot2grid((J + 2, 4), (J + 1, 2))
        plt.plot(time[:-14], tremor_averaged_15, 'k', label='Data')
        plt.legend(loc=3, fontsize=14)
        # Plot details
        for j in range(0, J):
            plt.subplot2grid((J + 2, 4), (J - j, 2))
            plt.plot(time[:-14], D[j], 'k', label='D' + str(j + 1))
            plt.ylim(minD, maxD)
            plt.legend(loc=3, fontsize=14)
        # Plot smooth
        plt.subplot2grid((J + 2, 4), (0, 2))
        plt.plot(time[:-14], S[J], 'k', label='S' + str(J))
        plt.ylim(minD, maxD)
        plt.legend(loc=3, fontsize=14)
        plt.title('Moving average (15 days)', fontsize=24)

        # Third method: Low-pass filter
        param1, param2 = butter(4, 0.1, btype='low')
        tremor_filtered = tremor - lfilter(param1, param2, tremor)

        # MODWT
        (W, V) = pyramid(tremor_filtered, wavelet, J)
        (D, S) = get_DS(tremor_filtered, W, wavelet, J)
        
        maxD = max([np.max(Dj) for Dj in D])
        minD = min([np.min(Dj) for Dj in D])

        # Plot data
        plt.subplot2grid((J + 2, 4), (J + 1, 3))
        plt.plot(time, tremor_filtered, 'k', label='Data')
        plt.legend(loc=3, fontsize=14)
        # Plot details
        for j in range(0, J):
            plt.subplot2grid((J + 2, 4), (J - j, 3))
            plt.plot(time, D[j], 'k', label='D' + str(j + 1))
            plt.ylim(minD, maxD)
            plt.legend(loc=3, fontsize=14)
        # Plot smooth
        plt.subplot2grid((J + 2, 4), (0, 3))
        plt.plot(time, S[J], 'k', label='S' + str(J))
        plt.ylim(minD, maxD)
        plt.legend(loc=3, fontsize=14)
        plt.title('Low-pass filter', fontsize=24)

        # Save figure
        plt.savefig('tremor_' + str(index) + '.pdf', format='pdf')
        plt.close(1)

        if __name__ == '__main__':

    station_file = '../data/PANGA/stations.txt'
    tremor_file = '../data/tremor/mbbp_cat_d_forHeidi'
    radius_GPS = 50
    radius_tremor = 50
    direction = 'lon'
    dataset = 'cleaned'
    wavelet = 'LA8'
    J = 8
    slowness = np.arange(-0.1, 0.105, 0.005)
    lats = [47.20000, 47.30000, 47.40000, 47.50000, 47.60000, 47.70000, \
        47.80000, 47.90000, 48.00000, 48.10000, 48.20000, 48.30000, 48.40000, \
        48.50000, 48.60000, 48.70000]
    lons = [-122.74294, -122.73912, -122.75036, -122.77612, -122.81591, \
        -122.86920, -122.93549, -123.01425, -123.10498, -123.20716, \
        -123.32028, -123.44381, -123.57726, -123.72011, -123.87183, \
        -124.03193]
    tmin_GPS = 2017.28
    tmax_GPS = 2018.28
    tmin_tremor = 2017.75
    tmax_tremor = 2017.81
    lonmin = -125.4
    lonmax = -121.4
    latmin = 46.3
    latmax = 49.6
    j = 4

    compute_wavelets_tremor(lats, lons, radius_tremor, wavelet, J)
Example #38
0
DS_tempg = xr.open_dataset("sst_global.nc")
da_tempg = DS_tempg.temperature_anomaly

#local climate
DS_cli = xr.open_dataset("era5_sst_global_2.nc")
DS_cli = DS_cli.sel(time=slice('1981-01-01', '2012-12-31'),
                    longitude=slice(-150, 0),
                    latitude=slice(60, -60))
#US-shape
da_cli_us = mask_shape_border(DS_cli, 'gadm36_USA_0.shp').t2m
da_cli_us_mean = (da_cli_us.to_dataframe().groupby(['time'
                                                    ]).mean()).to_xarray().t2m
da_cli_us_mean = da_cli_us_mean - 273.15
#detrend
mean_temp = da_cli_us_mean.mean()
da_cli_us_det = xr.DataArray(signal.detrend(da_cli_us_mean, axis=0),
                             dims=da_cli_us_mean.dims,
                             coords=da_cli_us_mean.coords,
                             attrs=da_cli_us_mean.attrs) + mean_temp
# Climate for each month
monthDict = {
    1: 'Jan',
    2: 'Feb',
    3: 'Mar',
    4: 'Apr',
    5: 'May',
    6: 'Jun',
    7: 'Jul',
    8: 'Aug',
    9: 'Sep',
    10: 'Oct',
Example #39
0
from scipy import signal
from matplotlib.dates import DateFormatter, DayLocator, MonthLocator
from scipy import optimize


today = date.today()
start = (today.year - 1, today.month, today.day)

quotes = quotes_historical_yahoo("QQQ", start, today)
quotes = np.array(quotes)

dates = quotes.T[0]
qqq = quotes.T[4]


y = signal.detrend(qqq)


alldays = DayLocator()              
months = MonthLocator()
month_formatter = DateFormatter("%b %Y")

fig = plt.figure()
ax = fig.add_subplot(211)

ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_formatter)

amps = np.abs(fftpack.fftshift(fftpack.rfft(y)))
amps[amps < amps.max()] = 0
Example #40
0
def update():
    global specmax, specmin, specmax_hist, specmin_hist, FFT_old, FFT_hist, redfreq, redwidth, bluefreq, bluewidth, counter, history

    # get last data
    last_index = ft_input.getHeader().nSamples
    begsample = (last_index - window)
    endsample = (last_index - 1)
    data = ft_input.getData([begsample, endsample])
    print "reading from sample %d to %d" % (begsample, endsample)

    # demean and detrend data before filtering to reduce edge artefacts and center timecourse
    # data = data - np.sum(data, axis=0)/float(len(data))
    data = detrend(data, axis=0)

    # Notch filter - DOES NOT WORK
    # data = notch_filter(data, 10, hdr_input.fSample, 30)

    # taper data
    taper = np.hanning(len(data))
    data = data * taper[:, np.newaxis]

    # shift data to next sample
    history = np.roll(history, 1, axis=2)

    for ichan in range(numchannel):

        channr = int(chanarray[ichan])

        # current FFT with smoothing
        FFT_temp = abs(fft(data[:, int(chanarray[ichan])]))
        FFT[ichan] = FFT_temp * lrate + FFT_old[ichan] * (1 - lrate)
        FFT_old[ichan] = FFT[ichan]

        # update history with current FFT
        history[ichan, :, numhistory - 1] = FFT_temp
        FFT_hist = np.nanmean(history, axis=2)

        # user-selected frequency band
        arguments_freqrange = patch.getstring('arguments',
                                              'freqrange').split("-")
        arguments_freqrange = [float(s) for s in arguments_freqrange]
        freqrange = np.greater(freqaxis,
                               arguments_freqrange[0]) & np.less_equal(
                                   freqaxis, arguments_freqrange[1])

        # update panels
        spect[ichan].setData(freqaxis[freqrange], FFT[ichan][freqrange])
        spect_hist[ichan].setData(freqaxis[freqrange],
                                  FFT_hist[ichan][freqrange])

        # adapt the vertical scale to the running mean of max
        specmax[ichan] = float(specmax[ichan]) * (1 - lrate) + lrate * max(
            FFT[ichan][freqrange])
        specmin[ichan] = float(specmin[ichan]) * (1 - lrate) + lrate * min(
            FFT[ichan][freqrange])
        specmax_hist[ichan] = float(specmax_hist[ichan]) * (
            1 - lrate) + lrate * max(FFT_hist[ichan][freqrange])
        specmin_hist[ichan] = float(specmin_hist[ichan]) * (
            1 - lrate) + lrate * min(FFT_hist[ichan][freqrange])

        freqplot[ichan].setYRange(specmin[ichan], specmax[ichan])
        freqplot_hist[ichan].setYRange(specmin_hist[ichan],
                                       specmax_hist[ichan])

        # update plotted lines
        redfreq = patch.getfloat('input',
                                 'redfreq',
                                 default=10. / arguments_freqrange[1])
        redfreq = EEGsynth.rescale(redfreq, slope=scalered,
                                   offset=offsetred) * arguments_freqrange[1]
        redwidth = patch.getfloat('input',
                                  'redwidth',
                                  default=1. / arguments_freqrange[1])
        redwidth = EEGsynth.rescale(redwidth, slope=scalered,
                                    offset=offsetred) * arguments_freqrange[1]
        bluefreq = patch.getfloat('input',
                                  'bluefreq',
                                  default=20. / arguments_freqrange[1])
        bluefreq = EEGsynth.rescale(bluefreq,
                                    slope=scaleblue,
                                    offset=offsetblue) * arguments_freqrange[1]
        bluewidth = patch.getfloat('input',
                                   'bluewidth',
                                   default=4. / arguments_freqrange[1])
        bluewidth = EEGsynth.rescale(
            bluewidth, slope=scaleblue,
            offset=offsetblue) * arguments_freqrange[1]

        redleft[ichan].setData(x=[redfreq - redwidth, redfreq - redwidth],
                               y=[specmin[ichan], specmax[ichan]])
        redright[ichan].setData(x=[redfreq + redwidth, redfreq + redwidth],
                                y=[specmin[ichan], specmax[ichan]])
        blueleft[ichan].setData(x=[bluefreq - bluewidth, bluefreq - bluewidth],
                                y=[specmin[ichan], specmax[ichan]])
        blueright[ichan].setData(
            x=[bluefreq + bluewidth, bluefreq + bluewidth],
            y=[specmin[ichan], specmax[ichan]])
        redleft_hist[ichan].setData(
            x=[redfreq - redwidth, redfreq - redwidth],
            y=[specmin_hist[ichan], specmax_hist[ichan]])
        redright_hist[ichan].setData(
            x=[redfreq + redwidth, redfreq + redwidth],
            y=[specmin_hist[ichan], specmax_hist[ichan]])
        blueleft_hist[ichan].setData(
            x=[bluefreq - bluewidth, bluefreq - bluewidth],
            y=[specmin_hist[ichan], specmax_hist[ichan]])
        blueright_hist[ichan].setData(
            x=[bluefreq + bluewidth, bluefreq + bluewidth],
            y=[specmin_hist[ichan], specmax_hist[ichan]])

    # update labels at plotted lines
    text_redleft.setText('%0.1f' % (redfreq - redwidth))
    text_redleft.setPos(redfreq - redwidth, specmax[0])
    text_redright.setText('%0.1f' % (redfreq + redwidth))
    text_redright.setPos(redfreq + redwidth, specmax[0])
    text_blueleft.setText('%0.1f' % (bluefreq - bluewidth))
    text_blueleft.setPos(bluefreq - bluewidth, specmax[0])
    text_blueright.setText('%0.1f' % (bluefreq + bluewidth))
    text_blueright.setPos(bluefreq + bluewidth, specmax[0])

    text_redleft_hist.setText('%0.1f' % (redfreq - redwidth))
    text_redleft_hist.setPos(redfreq - redwidth, specmax_hist[0])
    text_redright_hist.setText('%0.1f' % (redfreq + redwidth))
    text_redright_hist.setPos(redfreq + redwidth, specmax_hist[0])
    text_blueleft_hist.setText('%0.1f' % (bluefreq - bluewidth))
    text_blueleft_hist.setPos(bluefreq - bluewidth, specmax_hist[0])
    text_blueright_hist.setText('%0.1f' % (bluefreq + bluewidth))
    text_blueright_hist.setPos(bluefreq + bluewidth, specmax_hist[0])

    key = "%s.%s.%s" % (patch.getstring('output', 'prefix'), 'redband', 'low')
    r.set(key, redfreq - redwidth)
    key = "%s.%s.%s" % (patch.getstring('output', 'prefix'), 'redband', 'high')
    r.set(key, redfreq + redwidth)
    key = "%s.%s.%s" % (patch.getstring('output', 'prefix'), 'blueband', 'low')
    r.set(key, bluefreq - bluewidth)
    key = "%s.%s.%s" % (patch.getstring('output',
                                        'prefix'), 'blueband', 'high')
    r.set(key, bluefreq + bluewidth)
    def get_results(self):
        """Execute API calls to the timeseries data and tweet data we need for analysis. Perform analysis
        as we go because we often need results for next steps."""
        ######################
        # (1) Get the timeline
        ######################
        logging.info("retrieving timeline counts")
        results_timeseries = Results(self.user,
                                     self.password,
                                     self.stream_url,
                                     self.options.paged,
                                     self.options.output_file_path,
                                     pt_filter=self.options.filter,
                                     max_results=int(self.options.max),
                                     start=self.options.start,
                                     end=self.options.end,
                                     count_bucket=self.options.count_bucket,
                                     show_query=self.options.query)
        # sort by date
        res_timeseries = sorted(results_timeseries.get_time_series(),
                                key=itemgetter(0))
        # if we only have one activity, probably don't do all of this
        if len(res_timeseries) <= 1:
            raise ValueError(
                "You've only pulled {} Tweets. time series analysis isn't what you want."
                .format(len(res_timeseries)))
        # calculate total time interval span
        time_min_date = min(res_timeseries, key=itemgetter(2))[2]
        time_max_date = max(res_timeseries, key=itemgetter(2))[2]
        time_min = float(calendar.timegm(time_min_date.timetuple()))
        time_max = float(calendar.timegm(time_max_date.timetuple()))
        time_span = time_max - time_min
        logging.debug("time_min = {}, time_max = {}, time_span = {}".format(
            time_min, time_max, time_span))
        # create a simple object to hold our data
        ts = TimeSeries()
        ts.dates = []
        ts.x = []
        ts.counts = []
        # load and format data
        for i in res_timeseries:
            ts.dates.append(i[2])
            ts.counts.append(float(i[1]))
            # create a independent variable in interval [0.0,1.0]
            ts.x.append((calendar.timegm(
                datetime.datetime.strptime(i[0], DATE_FMT).timetuple()) -
                         time_min) / time_span)
        logging.info("read {} time items from search API".format(len(
            ts.dates)))
        if len(ts.dates) < 35:
            logging.warn(
                "peak detection with with fewer than ~35 points is unreliable!"
            )
        logging.debug('dates: ' + ','.join(map(str, ts.dates[:10])) + "...")
        logging.debug('counts: ' + ','.join(map(str, ts.counts[:10])) + "...")
        logging.debug('indep var: ' + ','.join(map(str, ts.x[:10])) + "...")
        ######################
        # (1.1) Get a second timeline?
        ######################
        if self.options.second_filter is not None:
            logging.info("retrieving second timeline counts")
            results_timeseries = Results(
                self.user,
                self.password,
                self.stream_url,
                self.options.paged,
                self.options.output_file_path,
                pt_filter=self.options.second_filter,
                max_results=int(self.options.max),
                start=self.options.start,
                end=self.options.end,
                count_bucket=self.options.count_bucket,
                show_query=self.options.query)
            # sort by date
            second_res_timeseries = sorted(
                results_timeseries.get_time_series(), key=itemgetter(0))
            if len(second_res_timeseries) != len(res_timeseries):
                logging.error("time series of different sizes not allowed")
            else:
                ts.second_counts = []
                # load and format data
                for i in second_res_timeseries:
                    ts.second_counts.append(float(i[1]))
                logging.info("read {} time items from search API".format(
                    len(ts.second_counts)))
                logging.debug('second counts: ' +
                              ','.join(map(str, ts.second_counts[:10])) +
                              "...")
        ######################
        # (2) Detrend and remove prominent period
        ######################
        logging.info("detrending timeline counts")
        no_trend = signal.detrend(np.array(ts.counts))
        # determine period of data
        df = (ts.dates[1] - ts.dates[0]).total_seconds()
        if df == 86400:
            # day counts, average over week
            n_buckets = 7
            n_avgs = {i: [] for i in range(n_buckets)}
            for t, c in zip(ts.dates, no_trend):
                n_avgs[t.weekday()].append(c)
        elif df == 3600:
            # hour counts, average over day
            n_buckets = 24
            n_avgs = {i: [] for i in range(n_buckets)}
            for t, c in zip(ts.dates, no_trend):
                n_avgs[t.hour].append(c)
        elif df == 60:
            # minute counts; average over day
            n_buckets = 24 * 60
            n_avgs = {i: [] for i in range(n_buckets)}
            for t, c in zip(ts.dates, no_trend):
                n_avgs[t.minute].append(c)
        else:
            sys.stderr.write("Weird interval problem! Exiting.\n")
            logging.error("Weird interval problem! Exiting.\n")
            sys.exit()
        logging.info("averaging over periods of {} buckets".format(n_buckets))
        # remove upper outliers from averages
        df_avg_all = {i: np.average(n_avgs[i]) for i in range(n_buckets)}
        logging.debug("bucket averages: {}".format(','.join(
            map(str, [df_avg_all[i] for i in df_avg_all]))))
        n_avgs_remove_outliers = {
            i: [
                j for j in n_avgs[i]
                if abs(j - df_avg_all[i]) / df_avg_all[i] < (1. + OUTLIER_FRAC)
            ]
            for i in range(n_buckets)
        }
        df_avg = {
            i: np.average(n_avgs_remove_outliers[i])
            for i in range(n_buckets)
        }
        logging.debug("bucket averages w/o outliers: {}".format(','.join(
            map(str, [df_avg[i] for i in df_avg]))))

        # flatten cycle
        ts.counts_no_cycle_trend = np.array([
            no_trend[i] - df_avg[ts.dates[i].hour]
            for i in range(len(ts.counts))
        ])
        logging.debug('no trend: ' +
                      ','.join(map(str, ts.counts_no_cycle_trend[:10])) +
                      "...")

        ######################
        # (3) Moving average
        ######################
        ts.moving = np.convolve(ts.counts,
                                np.ones((N_MOVING, )) / N_MOVING,
                                mode='valid')
        logging.debug('moving ({}): '.format(N_MOVING) +
                      ','.join(map(str, ts.moving[:10])) + "...")

        ######################
        # (4) Peak detection
        ######################
        peakind = signal.find_peaks_cwt(ts.counts_no_cycle_trend,
                                        np.arange(MIN_PEAK_WIDTH,
                                                  MAX_PEAK_WIDTH),
                                        min_snr=MIN_SNR)
        n_peaks = min(MAX_N_PEAKS, len(peakind))
        logging.debug('peaks ({}): '.format(n_peaks) +
                      ','.join(map(str, peakind)))
        logging.debug('peaks ({}): '.format(n_peaks) +
                      ','.join(map(str, [ts.dates[i] for i in peakind])))

        # top peaks determined by peak volume, better way?
        # peak detector algorithm:
        #      * middle of peak (of unknown width)
        #      * finds peaks up to MAX_PEAK_WIDTH wide
        #
        #   algorithm for geting peak start, peak and end parameters:
        #      find max, find fwhm,
        #      find start, step past peak, keep track of volume and peak height,
        #      stop at end of period or when timeseries turns upward

        peaks = []
        for i in peakind:
            # find the first max in the possible window
            i_start = max(0, i - SEARCH_PEAK_WIDTH)
            i_finish = min(len(ts.counts) - 1, i + SEARCH_PEAK_WIDTH)
            p_max = max(ts.counts[i_start:i_finish])
            h_max = p_max / 2.
            # i_max not center
            i_max = i_start + ts.counts[i_start:i_finish].index(p_max)
            i_start, i_finish = i_max, i_max
            # start at peak, and go back and forward to find start and end
            while i_start >= 1:
                if (ts.counts[i_start - 1] <= h_max
                        or ts.counts[i_start - 1] >= ts.counts[i_start]
                        or i_start - 1 <= 0):
                    break
                i_start -= 1
            while i_finish < len(ts.counts) - 1:
                if (ts.counts[i_finish + 1] <= h_max
                        or ts.counts[i_finish + 1] >= ts.counts[i_finish]
                        or i_finish + 1 >= len(ts.counts)):
                    break
                i_finish += 1
            # i is center of peak so balance window
            delta_i = max(1, i - i_start)
            if i_finish - i > delta_i:
                delta_i = i_finish - i
            # final est of start and finish
            i_finish = min(len(ts.counts) - 1, i + delta_i)
            i_start = max(0, i - delta_i)
            p_volume = sum(ts.counts[i_start:i_finish])
            peaks.append([
                i, p_volume,
                (i, i_start, i_max, i_finish, h_max, p_max, p_volume,
                 ts.dates[i_start], ts.dates[i_max], ts.dates[i_finish])
            ])
        # top n_peaks by volume
        top_peaks = sorted(peaks, key=itemgetter(1))[-n_peaks:]
        # re-sort peaks by date
        ts.top_peaks = sorted(top_peaks, key=itemgetter(0))
        logging.debug('top peaks ({}): '.format(len(ts.top_peaks)) +
                      ','.join(map(str, ts.top_peaks[:4])) + "...")

        ######################
        # (5) high/low frequency
        ######################
        ts.cycle, ts.trend = sm.tsa.filters.hpfilter(np.array(ts.counts))
        logging.debug('cycle: ' + ','.join(map(str, ts.cycle[:10])) + "...")
        logging.debug('trend: ' + ','.join(map(str, ts.trend[:10])) + "...")

        ######################
        # (6) n-grams for top peaks
        ######################
        ts.topics = []
        if self.options.get_topics:
            logging.info("retrieving tweets for peak topics")
            for a in ts.top_peaks:
                # start at peak
                ds = datetime.datetime.strftime(a[2][8], DATE_FMT2)
                # estimate how long to get TWEET_SAMPLE tweets
                # a[1][5] is max tweets per period
                if a[2][5] > 0:
                    est_periods = float(TWEET_SAMPLE) / a[2][5]
                else:
                    logging.warn(
                        "peak with zero max tweets ({}), setting est_periods to 1"
                        .format(a))
                    est_periods = 1
                # df comes from above, in seconds
                # time resolution is hours
                est_time = max(int(est_periods * df), 60)
                logging.debug("est_periods={}, est_time={}".format(
                    est_periods, est_time))
                #
                if a[2][8] + datetime.timedelta(seconds=est_time) < a[2][9]:
                    de = datetime.datetime.strftime(
                        a[2][8] + datetime.timedelta(seconds=est_time),
                        DATE_FMT2)
                elif a[2][8] < a[2][9]:
                    de = datetime.datetime.strftime(a[2][9], DATE_FMT2)
                else:
                    de = datetime.datetime.strftime(
                        a[2][8] + datetime.timedelta(seconds=60), DATE_FMT2)
                logging.info(
                    "retreive data for peak index={} in date range [{},{}]".
                    format(a[0], ds, de))
                res = Results(self.user,
                              self.password,
                              self.stream_url,
                              self.options.paged,
                              self.options.output_file_path,
                              pt_filter=self.options.filter,
                              max_results=int(self.options.max),
                              start=ds,
                              end=de,
                              count_bucket=None,
                              show_query=self.options.query,
                              hard_max=TWEET_SAMPLE)
                logging.info("retrieved {} records".format(len(res)))
                n_grams_counts = list(
                    res.get_top_grams(n=self.token_list_size))
                ts.topics.append(n_grams_counts)
                logging.debug('n_grams for peak index={}: '.format(a[0]) +
                              ','.join(
                                  map(str, [
                                      i[4].encode("utf-8", "ignore")
                                      for i in n_grams_counts
                                  ][:10])) + "...")
        return ts
Example #42
0
def absolute_sdm(obs_cube, mod_cube, sce_cubes, *args, **kwargs):
    """
    apply absolute scaled distribution mapping to all scenario cubes
    assuming a normal distributed parameter

    Args:

    * obs_cube (:class:`iris.cube.Cube`):
        the observational data

    * mod_cube (:class:`iris.cube.Cube`):
        the model data at the reference period

    * sce_cubes (:class:`iris.cube.CubeList`):
        the scenario data that shall be corrected

    Kwargs:

    * cdf_threshold (float):
        limit of the cdf-values (default: .99999)
    """
    from scipy.stats import norm
    from scipy.signal import detrend

    cdf_threshold = kwargs.get('cdf_threshold', .99999)

    obs_cube_mask = np.ma.getmask(obs_cube.data)
    cell_iterator = np.nditer(obs_cube.data[0], flags=['multi_index'])
    while not cell_iterator.finished:
        index_list = list(cell_iterator.multi_index)
        cell_iterator.iternext()

        index_list.insert(0, 0)
        index = tuple(index_list)
        if obs_cube_mask.any() and obs_cube_mask[index]:
            continue

        index_list[0] = slice(0, None, 1)
        index = tuple(index_list)

        # consider only cells with valid observational data
        obs_data = obs_cube.data[index]
        mod_data = mod_cube.data[index]

        obs_len = len(obs_data)
        mod_len = len(mod_data)

        obs_mean = obs_data.mean()
        mod_mean = mod_data.mean()

        # detrend the data
        obs_detrended = detrend(obs_data)
        mod_detrended = detrend(mod_data)

        obs_norm = norm.fit(obs_detrended)
        mod_norm = norm.fit(mod_detrended)

        obs_cdf = norm.cdf(np.sort(obs_detrended), *obs_norm)
        mod_cdf = norm.cdf(np.sort(mod_detrended), *mod_norm)
        obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold),
                             1 - cdf_threshold)
        mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold),
                             1 - cdf_threshold)

        for sce_cube in sce_cubes:
            sce_data = sce_cube[index].data

            sce_len = len(sce_data)
            sce_mean = sce_data.mean()

            sce_detrended = detrend(sce_data)
            sce_diff = sce_data - sce_detrended
            sce_argsort = np.argsort(sce_detrended)

            sce_norm = norm.fit(sce_detrended)
            sce_cdf = norm.cdf(np.sort(sce_detrended), *sce_norm)
            sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold),
                                 1 - cdf_threshold)

            # interpolate cdf-values for obs and mod to the length of the
            # scenario
            obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len),
                                       np.linspace(1, obs_len, obs_len),
                                       obs_cdf)
            mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len),
                                       np.linspace(1, mod_len, mod_len),
                                       mod_cdf)

            # adapt the observation cdfs
            # split the tails of the cdfs around the center
            obs_cdf_shift = obs_cdf_intpol - .5
            mod_cdf_shift = mod_cdf_intpol - .5
            sce_cdf_shift = sce_cdf - .5
            obs_inverse = 1. / (.5 - np.abs(obs_cdf_shift))
            mod_inverse = 1. / (.5 - np.abs(mod_cdf_shift))
            sce_inverse = 1. / (.5 - np.abs(sce_cdf_shift))
            adapted_cdf = np.sign(obs_cdf_shift) * (
                1. - 1. / (obs_inverse * sce_inverse / mod_inverse))
            adapted_cdf[adapted_cdf < 0] += 1.
            adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold),
                                     1 - cdf_threshold)

            xvals = norm.ppf(np.sort(adapted_cdf), *obs_norm) \
                + obs_norm[-1] / mod_norm[-1] \
                * (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))
            xvals -= xvals.mean()
            xvals += obs_mean + (sce_mean - mod_mean)

            correction = np.zeros(sce_len)
            correction[sce_argsort] = xvals
            correction += sce_diff - sce_mean
            sce_cube.data[index] = correction
Example #43
0
#Load the silizon data
fname = 'D:/Data/default_fname_%3d.fits' % si_num
si_data = np.squeeze(fits.getdata(fname))
fname = 'D:/Data/default_fname_%3d.fits' % atm_num
atm_data = np.squeeze(fits.getdata(fname))

c = 2.99792458e11  #[mm/s]
F_max = c / (4 * dx * np.cos(12.2 * np.pi / 180) *
             np.cos(5.78 / 2. * np.pi / 180) * 1e12)  #[THz]
Fs = F_max / 2 * np.linspace(0, 1, 1598 / 2)

trans = []

for i in range(40):
    si = detrend(si_data[:, i])

    si_ind = np.argmax(np.abs(si))
    si_nsteps = np.min((si_ind, len(si) - si_ind))
    si = np.roll(si[si_ind - si_nsteps:si_ind + si_nsteps], si_nsteps)
    si_pos = np.roll(pos[si_ind - si_nsteps:si_ind + si_nsteps], si_nsteps)

    si_fft = np.fft.fft(si)
    si_phi = np.angle(si_fft)
    si_amplitude = np.abs(si_fft)**2
    si_fft_corr = si_fft.real * np.cos(si_phi) + si_fft.imag * np.sin(si_phi)
    si_psd = np.abs(si_fft_corr[:len(si_fft) / 2])**2

    #Load the atmosphere data

    atm = detrend(np.average(fits.getdata(fname), axis=(0, 2))) * scale_atm
Example #44
0
    def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
        """
        Core routine for detecting outliers
        """
        from scipy import signal

        if not cwd:
            cwd = os.getcwd()

        # read in functional image
        if isinstance(imgfile, (str, bytes)):
            nim = load(imgfile, mmap=NUMPY_MMAP)
        elif isinstance(imgfile, list):
            if len(imgfile) == 1:
                nim = load(imgfile[0], mmap=NUMPY_MMAP)
            else:
                images = [load(f, mmap=NUMPY_MMAP) for f in imgfile]
                nim = funcs.concat_images(images)

        # compute global intensity signal
        (x, y, z, timepoints) = nim.shape

        data = nim.get_data()
        affine = nim.affine
        g = np.zeros((timepoints, 1))
        masktype = self.inputs.mask_type
        if masktype == "spm_global":  # spm_global like calculation
            iflogger.debug("art: using spm global")
            intersect_mask = self.inputs.intersect_mask
            if intersect_mask:
                mask = np.ones((x, y, z), dtype=bool)
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    # Use an SPM like approach
                    mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold)
                    mask = mask * mask_tmp
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    g[t0] = np.nanmean(vol[mask])
                if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
                    intersect_mask = False
                    g = np.zeros((timepoints, 1))
            if not intersect_mask:
                iflogger.info("not intersect_mask is True")
                mask = np.zeros((x, y, z, timepoints))
                for t0 in range(timepoints):
                    vol = data[:, :, :, t0]
                    mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold)
                    mask[:, :, :, t0] = mask_tmp
                    g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp)
        elif masktype == "file":  # uses a mask image to determine intensity
            maskimg = load(self.inputs.mask_file, mmap=NUMPY_MMAP)
            mask = maskimg.get_data()
            affine = maskimg.affine
            mask = mask > 0.5
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                g[t0] = np.nanmean(vol[mask])
        elif masktype == "thresh":  # uses a fixed signal threshold
            for t0 in range(timepoints):
                vol = data[:, :, :, t0]
                mask = vol > self.inputs.mask_threshold
                g[t0] = np.nanmean(vol[mask])
        else:
            mask = np.ones((x, y, z))
            g = np.nanmean(data[mask > 0, :], 1)

        # compute normalized intensity values
        gz = signal.detrend(g, axis=0)  # detrend the signal
        if self.inputs.use_differences[1]:
            gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0)
        gz = (gz - np.mean(gz)) / np.std(gz)  # normalize the detrended signal
        iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)

        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        mc = deepcopy(mc_in)

        (
            artifactfile,
            intensityfile,
            statsfile,
            normfile,
            plotfile,
            displacementfile,
            maskfile,
        ) = self._get_output_filenames(imgfile, cwd)
        mask_img = Nifti1Image(mask.astype(np.uint8), affine)
        mask_img.to_filename(maskfile)

        if self.inputs.use_norm:
            brain_pts = None
            if self.inputs.bound_by_brainmask:
                voxel_coords = np.nonzero(mask)
                coords = np.vstack(
                    (voxel_coords[0], np.vstack((voxel_coords[1], voxel_coords[2])))
                ).T
                brain_pts = np.dot(
                    affine, np.hstack((coords, np.ones((coords.shape[0], 1)))).T
                )
            # calculate the norm of the motion parameters
            normval, displacement = _calc_norm(
                mc,
                self.inputs.use_differences[0],
                self.inputs.parameter_source,
                brain_pts=brain_pts,
            )
            tidx = find_indices(normval > self.inputs.norm_threshold)
            ridx = find_indices(normval < 0)
            if displacement is not None:
                dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
                for i in range(timepoints):
                    dmap[
                        voxel_coords[0], voxel_coords[1], voxel_coords[2], i
                    ] = displacement[i, :]
                dimg = Nifti1Image(dmap, affine)
                dimg.to_filename(displacementfile)
        else:
            if self.inputs.use_differences[0]:
                mc = np.concatenate(
                    (np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0
                )
            traval = mc[:, 0:3]  # translation parameters (mm)
            rotval = mc[:, 3:6]  # rotation parameters (rad)
            tidx = find_indices(
                np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0
            )
            ridx = find_indices(
                np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0
            )

        outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))

        # write output to outputfile
        np.savetxt(artifactfile, outliers, fmt=b"%d", delimiter=" ")
        np.savetxt(intensityfile, g, fmt=b"%.2f", delimiter=" ")
        if self.inputs.use_norm:
            np.savetxt(normfile, normval, fmt=b"%.4f", delimiter=" ")

        if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
            import matplotlib

            matplotlib.use(config.get("execution", "matplotlib_backend"))
            import matplotlib.pyplot as plt

            fig = plt.figure()
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(211)
            else:
                plt.subplot(311)
            self._plot_outliers_with_wave(gz, iidx, "Intensity")
            if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
                plt.subplot(212)
                self._plot_outliers_with_wave(
                    normval, np.union1d(tidx, ridx), "Norm (mm)"
                )
            else:
                diff = ""
                if self.inputs.use_differences[0]:
                    diff = "diff"
                plt.subplot(312)
                self._plot_outliers_with_wave(traval, tidx, "Translation (mm)" + diff)
                plt.subplot(313)
                self._plot_outliers_with_wave(rotval, ridx, "Rotation (rad)" + diff)
            plt.savefig(plotfile)
            plt.close(fig)

        motion_outliers = np.union1d(tidx, ridx)
        stats = [
            {"motion_file": motionfile, "functional_file": imgfile},
            {
                "common_outliers": len(np.intersect1d(iidx, motion_outliers)),
                "intensity_outliers": len(np.setdiff1d(iidx, motion_outliers)),
                "motion_outliers": len(np.setdiff1d(motion_outliers, iidx)),
            },
            {
                "motion": [
                    {"using differences": self.inputs.use_differences[0]},
                    {
                        "mean": np.mean(mc_in, axis=0).tolist(),
                        "min": np.min(mc_in, axis=0).tolist(),
                        "max": np.max(mc_in, axis=0).tolist(),
                        "std": np.std(mc_in, axis=0).tolist(),
                    },
                ]
            },
            {
                "intensity": [
                    {"using differences": self.inputs.use_differences[1]},
                    {
                        "mean": np.mean(gz, axis=0).tolist(),
                        "min": np.min(gz, axis=0).tolist(),
                        "max": np.max(gz, axis=0).tolist(),
                        "std": np.std(gz, axis=0).tolist(),
                    },
                ]
            },
        ]
        if self.inputs.use_norm:
            stats.insert(
                3,
                {
                    "motion_norm": {
                        "mean": np.mean(normval, axis=0).tolist(),
                        "min": np.min(normval, axis=0).tolist(),
                        "max": np.max(normval, axis=0).tolist(),
                        "std": np.std(normval, axis=0).tolist(),
                    }
                },
            )
        save_json(statsfile, stats)
Example #45
0
def calculate_complex_cross_coherence(time_series, epoch_length,
                                      segment_length, segment_shift,
                                      window_function, average_segments,
                                      subtract_epoch_average, zeropad,
                                      detrend_ts, max_freq, npat):
    """
    # type: (TimeSeries, float, float, float, str, bool, bool, int, bool, float, float)  -> ComplexCoherenceSpectrum
    Calculate the FFT, Cross Coherence and Complex Coherence of time_series
    broken into (possibly) epochs and segments of length `epoch_length` and
    `segment_length` respectively, filtered by `window_function`.

    Parameters
    __________

    time_series : TimeSeries
    The timeseries for which the CrossCoherence and ComplexCoherence is to be computed.

    epoch_length : float
    In general for lengthy EEG recordings (~30 min), the timeseries are divided into equally
    sized segments (~ 20-40s). These contain the  event that is to be characterized by means of the
    cross coherence. Additionally each epoch block will be further divided into segments to  which
    the FFT will be applied.

    segment_length : float
    The segment length determines the frequency resolution of the resulting power spectra --
    longer windows produce finer frequency resolution.

    segment_shift : float
    Time length by which neighboring segments are shifted. e.g.
    `segment shift` = `segment_length` / 2 means 50% overlapping segments.

    window_function : str
    Windowing functions can be applied before the FFT is performed.

    average_segments : bool
    Flag. If `True`, compute the mean Cross Spectrum across  segments.

    subtract_epoch_average: bool
    Flag. If `True` and if the number of epochs is > 1, you can optionally subtract the
    mean across epochs before computing the complex coherence.

    zeropad : int
    Adds `n` zeros at the end of each segment and at the end of window_function. It is not yet functional.

    detrend_ts : bool
    Flag. If `True` removes linear trend along the time dimension before applying FFT.

    max_freq : float
    Maximum frequency points (e.g. 32., 64., 128.) represented in the output. Default is segment_length / 2 + 1.

    npat : float
    This attribute appears to be related to an input projection matrix... Which is not yet implemented.
    """
    # self.time_series.trait["data"].log_debug(owner=cls_attr_name)
    tpts = time_series.data.shape[0]
    time_series_length = tpts * time_series.sample_period

    if len(time_series.data.shape) > 2:
        time_series_data = numpy.squeeze(
            (time_series.data.mean(axis=-1)).mean(axis=1))

    # Divide time-series into epochs, no overlapping
    if epoch_length > 0.0:
        nepochs = int(numpy.floor(time_series_length / epoch_length))
        epoch_tpts = int(epoch_length / time_series.sample_period)
        time_series_length = epoch_length
        tpts = epoch_tpts
    else:
        epoch_length = time_series_length
        nepochs = int(numpy.ceil(time_series_length / epoch_length))

    # Segment time-series, overlapping if necessary
    nseg = int(numpy.floor(time_series_length / segment_length))
    if nseg > 1:
        seg_tpts = int(segment_length / time_series.sample_period)
        seg_shift_tpts = int(segment_shift / time_series.sample_period)
        nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
    else:
        segment_length = time_series_length
        seg_tpts = time_series_data.shape[0]

    # Frequency
    nfreq = int(
        numpy.min([max_freq,
                   numpy.floor((seg_tpts + zeropad) / 2.0) + 1]))

    resulted_shape, av_result_shape = complex_coherence_result_shape(
        time_series.data.shape, max_freq, epoch_length, segment_length,
        segment_shift, time_series.sample_period, zeropad, average_segments)
    cs = numpy.zeros(resulted_shape, dtype=numpy.complex128)
    av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
    coh = numpy.zeros(resulted_shape, dtype=numpy.complex128)

    # Apply windowing function
    if window_function is not None:
        if window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
            log.error("Windowing function is: %s" % window_function)
            log.error("Must be in: %s" % str(SUPPORTED_WINDOWING_FUNCTIONS))

        window_func = eval("".join(("numpy.", window_function)))
        win = window_func(seg_tpts)
        window_mask = (numpy.kron(numpy.ones((time_series_data.shape[1], 1)),
                                  win)).T

    nave = 0

    for j in numpy.arange(nepochs):
        data = time_series_data[j * epoch_tpts:(j + 1) * epoch_tpts, :]

        for i in numpy.arange(nseg):  # average over all segments;
            ts = data[i * seg_shift_tpts:i * seg_shift_tpts + seg_tpts, :]

            if detrend_ts:
                ts = sp_signal.detrend(ts, axis=0)

            datalocfft = numpy.fft.fft(ts * window_mask, axis=0)
            datalocfft = numpy.matrix(datalocfft)

            for f in numpy.arange(nfreq):  # for all frequencies
                if npat == 1:
                    if not average_segments:
                        cs[:, :, f, i] += numpy.conjugate(
                            datalocfft[f, :].conj().T * datalocfft[f, :])
                        av[:, f,
                           i] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        cs[:, :, f] += numpy.conjugate(
                            datalocfft[f, :].conj().T * datalocfft[f, :])
                        av[:, f] += numpy.conjugate(datalocfft[f, :].conj().T)
                else:
                    if not average_segments:
                        cs[:, :, f, j, i] = numpy.conjugate(
                            datalocfft[f, :].conj().T * datalocfft[f, :])
                        av[:, f, j,
                           i] = numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        cs[:, :, f, j] += numpy.conjugate(
                            datalocfft[f, :].conj().T * datalocfft[f, :])
                        av[:, f,
                           j] += numpy.conjugate(datalocfft[f, :].conj().T)
            del datalocfft

        nave += 1.0

    # End of FORs
    if not average_segments:
        cs = cs / nave
        av = av / nave
    else:
        nave = nave * nseg
        cs = cs / nave
        av = av / nave

    # Subtract average
    for f in numpy.arange(nfreq):
        if subtract_epoch_average:
            if npat == 1:
                if not average_segments:
                    for i in numpy.arange(nseg):
                        cs[:, :, f,
                           i] = cs[:, :, f,
                                   i] - av[:, f, i] * av[:, f, i].conj().T
                else:
                    cs[:, :, f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
            else:
                if not average_segments:
                    for i in numpy.arange(nseg):
                        for j in numpy.arange(nepochs):
                            cs[:, :, f, j,
                               i] = cs[:, :, f, j,
                                       i] - av[:, f, j, i] * av[:, f, j,
                                                                i].conj().T

                else:
                    for j in numpy.arange(nepochs):
                        cs[:, :, f,
                           j] = cs[:, :, f,
                                   j] - av[:, f, j] * av[:, f, j].conj().T

    # Compute Complex Coherence
    ndim = len(cs.shape)
    if ndim == 3:
        for i in numpy.arange(cs.shape[2]):
            temp = numpy.matrix(cs[:, :, i])
            coh[:, :, i] = cs[:, :, i] / numpy.sqrt(
                temp.diagonal().conj().T * temp.diagonal())

    elif ndim == 4:
        for i in numpy.arange(cs.shape[2]):
            for j in numpy.arange(cs.shape[3]):
                temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                coh[:, :, i, j] = temp / numpy.sqrt(
                    temp.diagonal().conj().T * temp.diagonal().T)

    log.debug("result")
    log.debug(narray_describe(cs))
    spectra = spectral.ComplexCoherenceSpectrum(
        source=time_series,
        array_data=coh,
        cross_spectrum=cs,
        epoch_length=epoch_length,
        segment_length=segment_length,
        windowing_function=window_function)
    return spectra
Example #46
0
lnm_cntrl = np.array(lnm)[::-1]
lnm_per = np.array(x_lnm)[::-1]
hr = np.int(time_diff / 3600.)
mn = np.int((time_diff - hr * 3600) / 60.)
sec = time_diff - hr * 3600 - mn * 60
print "Loaded ", hr, " hr ", mn, " min ", sec, " sec of waveform data."
print "It will analyze ", total_win - 1, " windows of ", time_win, " sec length"
ax = plt.gca()
for i in xrange(total_win - 1):
    sys.stdout.write("Processing %d window\r" % (i))
    sys.stdout.flush()
    amp = []
    data_slice = 0
    data_slice = station[0].slice(t_start + i * time_shift, t_start +
                                  (i * time_shift + time_win)).copy()
    data_slice = sg.detrend(data_slice.data)
    num = np.size(data_slice)
    n_total = np.power(2, np.int(np.log2(num)) + 1)
    if num > 0:
        taper = np.hanning(np.size(data_slice))
        data_slice = (data_slice - np.mean(data_slice)) * taper
        amp = np.fft.rfft(data_slice - np.mean(data_slice) * taper, n_total)
        num = n_total
        amp = amp[0:int(num / 2) + 1] * np.conjugate(amp[0:int(num / 2) + 1])
        norm = 2.0 * station[0].stats.delta / float(n_total)
        amp = np.abs(amp[0:int(num / 2) + 1]) * norm

        frq = np.fft.rfftfreq(n_total, station[0].stats.delta)
        frq = frq[0:int(num / 2) + 1]
        frq[1:] = 1. / frq[1:]
        frq[0] = time_win
Example #47
0
    def run(self, tf, dt, c1, c2):
        np.random.seed(62)
        """ Run a simulation """
        n, m, k = self.n, self.m, self.k

        # Total simulation time
        simTime = int(tf / dt)

        # Returns the three synaptic connections kernels
        W12, W21, W22, delays = self.build_kernels()

        # Compute delays by dividing distances by axonal velocity
        delays12 = np.floor(delays[0] / c2)
        delays21 = np.floor(delays[1] / c1)
        delays22 = np.floor(delays[2] / c2)
        maxDelay = int(
            max(delays12[0].max(), delays21[0].max(), delays22[0].max()))

        # Set the initial conditions and the history
        self.initial_conditions(simTime)

        # Initialize the cortical and striatal inputs
        Cx = 0.5
        Str = 0.4

        # Presynaptic activities
        pre12, pre21, pre22 = np.empty((m, )), np.empty((m, )), np.empty((m, ))

        # Simulation
        for i in range(maxDelay, simTime):
            # Take into account the history of rate for each neuron according
            # to its axonal delay
            for idxi, ii in enumerate(range(m)):
                mysum = 0.0
                for jj in range(k, n):
                    mysum += (W12[ii, jj] *
                              self.X2[i - delays12[ii, jj], jj]) * self.dx
                pre12[idxi] = mysum

            for idxi, ii in enumerate(range(k, n)):
                mysum = 0.0
                for jj in range(0, m):
                    mysum += (W21[ii, jj] *
                              self.X1[i - delays21[ii, jj], jj]) * self.dx
                pre21[idxi] = mysum

            for idxi, ii in enumerate(range(k, n)):
                mysum = 0.0
                for jj in range(k, n):
                    mysum += (W22[ii, jj] *
                              self.X2[i - delays22[ii, jj], jj]) * self.dx
                pre22[idxi] = mysum

            # Forward Euler step
            self.X1[i, :m] = (
                self.X1[i - 1, :m] +
                (-self.X1[i - 1, :m] + self.S1(-pre12 + Cx)) * dt / self.tau1)
            self.X2[i, k:] = (
                self.X2[i - 1, k:] +
                (-self.X2[i - 1, k:] + self.S2(pre21 - pre22 - Str)) * dt /
                self.tau2)
        dx = 1.0 / float(m)
        fr = self.X1.sum(axis=1) * dx / 1.0

        signal = detrend(fr)
        windowed = signal * blackmanharris(len(signal))
        f = rfft(windowed)
        i = np.argmax(np.abs(f))
        # true_i = parabolic(np.log(np.abs(f)), i)[0]
        return i
Example #48
0
			utils.draw_face_roi(face_box,frame)
			t = np.arange(timestamps[0],timestamps[-1],1/fs)
			mean_colors_resampled = np.zeros((3,t.shape[0]))
			
			for color in [B,G,R]:
				resampled = np.interp(t,timestamps,np.array(mean_colors)[:,color])
				mean_colors_resampled[color] = resampled

	# Perform chrominance method
	if mean_colors_resampled.shape[1] > window:

		col_c = np.zeros((3,window))
        
		for col in [B,G,R]:
			col_stride 	= mean_colors_resampled[col,-window:]# select last samples
			y_ACDC 		= signal.detrend(col_stride/np.mean(col_stride))
			col_c[col] 	= y_ACDC * skin_vec[col]
            
		X_chrom     = col_c[R]-col_c[G]
		Y_chrom     = col_c[R] + col_c[G] - 2* col_c[B]
		Xf          = utils.bandpass_filter(X_chrom) 
		Yf          = utils.bandpass_filter(Y_chrom)
		Nx          = np.std(Xf)
		Ny          = np.std(Yf)
		alpha_CHROM = Nx/Ny
        
		x_stride   				= Xf - alpha_CHROM*Yf
		amplitude 				= np.abs( np.fft.fft(x_stride,window)[:int(window/2+1)])
		normalized_amplitude 	= amplitude/amplitude.max() #  Normalized Amplitude
		
		frequencies = np.linspace(0,fs/2,int(window/2) + 1) * 60
Example #49
0
        # Define the bed geometry
        h = bed_pick[line][1]
        bed_raw = elev - h
        bed = medfilt(bed_raw, 401)

        # RMS bed roughness
        ED1 = np.empty((len(bed), ))
        ED1[:] = np.nan
        for n in range(N, len(bed) - N + 1):
            z = bed[n - N:n + N]
            z = z[np.where(~np.isnan(z))]
            if len(z) <= 1:
                ED1[n] = np.nan
            else:
                z_ = detrend(z)
                total = 0.
                for i in range(len(z)):
                    total += (z_[i])**2.
                ED1[n] = np.sqrt((1 / (len(z) - 1.)) * total)

        # Find the power reduction by Kirchoff theory
        g = 4 * np.pi * ED1 / lam
        b = (i0((g**2.) / 2.))**2.
        pn = np.exp(-(g**2.)) * b

        if line == 's5_pole_7_051':
            hold = 1. - pn[2723:3233]
            pn[2723:3233] = 1. - .1 * hold

        power = bed_pick[line][2]
Example #50
0
tt1 = np.where(year_data == 1983)[0][0]
tt2 = np.where(year_data == 2012)[0][-1]
nYears = 2012 - 1983 + 1

# Generate predictor matrix for climate modes
if sw_nomodes:
    #
    # ENSO (MEI index)
    #
    MEI = np.genfromtxt('../../data/modes/mei.combined.1871_2016')
    t_MEI, tmp, tmp, year_MEI, month_MEI, day_MEI, tmp = ecj.timevector(
        [1871, 1, 1], [2016, 12, 31])
    year_MEI = year_MEI[day_MEI == 15]
    MEI = MEI[year_MEI >= yearStart]
    MEI = MEI - MEI.mean()
    MEI = signal.detrend(MEI)
    # Predictor matrix with lags
    Nl = 12  # Number of lead-lags
    which = []
    X = np.zeros((len(MEI), 3 + 2 * Nl + 2))
    X[:, 0] += 1  # Mean
    X[:, 1] = np.arange(len(MEI)) - np.arange(len(MEI)).mean()  # Linear trend
    which.append('Mean')
    which.append('Trend')
    # Mode leads SST
    cnt = 1
    for k in range(1, Nl + 1):
        cnt += 1
        X[:-k, cnt] = MEI[k:]
        which.append('MEI')
    # 0-lag
Example #51
0
def my_form_post():
    shop_Id = request.form['shop_id']
    week_No = request.form['week_no']
    ShopID = int(shop_Id)
    WeekNo=int(week_No)


    # %matplotlib inline

    #plt.rcParams['figure.figsize'] = (20, 10)
    plt.style.use('ggplot')

    data_f = pd.read_csv('weekly sales and labour cost for all shops 2013 to 20177.csv')
    data = pd.read_csv('weekly sales and labour cost for all shops 2013 to 20177.csv',index_col='start_date', parse_dates=True)
    # shopID = input("Enter your shop id")
    shopID1 = ShopID
    # if shopID1<min(data.shop_id) or shopID1>max(data.shop_id):
    # print("Enter correct shop id number")
    # return select_model()
    WeekNo1=WeekNo

    data2 = data[['sales_id', 'shop_id', 'week_no', 'sales_amount', 'item_sold', 'transactions', 'total_tax', 'sales_status']]
    df1 = data2[data2.shop_id == shopID1]  # input №1
    df2 = df1[df1.sales_status != 0]
    df2.week_no.isnull().values.any()
    nulldetect = df1.week_no.isnull()
    nulldetect[nulldetect == True].index
    df2.week_no.loc[nulldetect == True] = 54
    df2['week_no'] = df2.week_no - 2
    if len(df2.week_no) > 51:

        dff = df2[['sales_amount']]
        data3 = dff.reset_index()
        data4 = data3

        data5 = data4.rename(columns={'start_date': 'ds', 'sales_amount': 'y'})
        data5.set_index('ds')
        data5 = data5.replace([np.inf, -np.inf], np.nan).fillna(0)
        # y.plot()
        data5['y'] = np.log(data5['y'])
        data5.set_index('ds')
        model = Prophet()
        model.fit(data5)
        future = model.make_future_dataframe(periods=52, freq='w')
        forecast = model.predict(future)
        data5.set_index('ds', inplace=True)
        forecast.set_index('ds', inplace=True)
        viz_df = dff.join(forecast[['yhat', 'yhat_lower', 'yhat_upper']], how='outer')
        viz_df['yhat_rescaled'] = np.exp(viz_df['yhat'])
        dff.index = pd.to_datetime(dff.index)  # make sure our index as a datetime object
        connect_date = dff.index[-2]  # select the 2nd to last date
        mask = (forecast.index > connect_date)
        predict_df = forecast.loc[mask]
        viz_df = dff.join(predict_df[['yhat', 'yhat_lower', 'yhat_upper']], how='outer')
        viz_df['yhat_scaled'] = np.exp(viz_df['yhat'])
        ii = len(dff.sales_amount) - 1
        viz_df.yhat_scaled[ii:]
        predicted_future_sales = pd.DataFrame(viz_df.yhat_scaled[ii:])
        predicted_future_sales1 = predicted_future_sales.rename(columns={'yhat_scaled': 'future_sales'})
        predicted_future_sales2 = predicted_future_sales1.reset_index()
        week_no = predicted_future_sales2['index'].dt.week
        future_sales = predicted_future_sales2['future_sales']
        future_sales1 = round(future_sales, 2)
        start_date = predicted_future_sales2['index']
        predict_data = {'future_sales': future_sales1, 'week_no': week_no, 'start_date': start_date}
        predict_data1 = pd.DataFrame(predict_data)

        predict_data2 = predict_data1.set_index('start_date')
        frames = [df2.sales_amount, predict_data2.future_sales]
        join = pd.concat(frames)
        detrend_sdata = signal.detrend(join)
        trend = join - detrend_sdata

        p2 = predict_data1.set_index('start_date')
        r = []
        for jj in pd.DataFrame(df2.index.year.values).drop_duplicates().index.values:
            sale_year = df2.sales_amount[str(int(pd.DataFrame(df2.index.year).drop_duplicates().loc[jj]))].mean()
            r.append(sale_year)
        years = pd.DataFrame(df2.index.year).drop_duplicates().start_date.values
        holday = []
        for t in years[0:len(years) - 1]:
            h = df2.sales_amount[df2.week_no >= 50][str(t)].mean() + df2.sales_amount[df2.week_no <= 3][str(int(t) + 1)].mean()
            holday.append(h / 2)
        year_last = p2.future_sales[p2.week_no >= 50][str(years[-1])].mean() + p2.future_sales[p2.week_no <= 3].mean()  # 2018
        holday.append(year_last / 2)
        N = len(r)
        Holiday_Means = holday
        All_Year_Means = r
        ind = np.arange(N)

        avg_sale=round(df2.sales_amount.mean(),2)
        maxSale=round(max(df2.sales_amount), 2)
        minSale=round(min(df2.sales_amount), 2)
        itemTrans=round((df2.item_sold / df2.transactions).mean(), 2)
        priceofitem=round((df2.sales_amount/df2.item_sold).mean(),2)
        avgitems=round(df2.item_sold.mean(), 2)
        best = pd.DataFrame(df2[['week_no', 'sales_amount']][(df2.sales_amount <= max(df2.sales_amount))
                                                             & (df2.sales_amount >= max(df2.sales_amount) - 2000)])
        worst = pd.DataFrame(df2[['week_no', 'sales_amount']][(df2.sales_amount >= min(df2.sales_amount))
                                                              & (df2.sales_amount <= min(df2.sales_amount) + 1500)])
        best1 = best.rename(columns={'week_no': 'Best Weeks', 'sales_amount': 'Sales'})
        worst1 = worst.rename(columns={'week_no': 'Worst Weeks', 'sales_amount': 'Sales'})
        result = pd.concat([best1, worst1], axis=1)
        # result.fillna('  ')
        # result['Date']=result.index.values
        result.insert(0, 'Date', result.index.values)
        result = result.fillna('-')


        fig, ax1 = plt.subplots(figsize=(7, 4))
        ax1.plot(viz_df.sales_amount)
        ax1.plot(viz_df.yhat_scaled,color='green')
        ax1.plot(join.index, trend, color='blue', alpha=0.5, label='Trend')
        #ax1.plot(join.index, trend, color='blue', alpha=0.5, label='Trend')
        #ax1.fill_between(viz_df.index, np.exp(viz_df['yhat_upper']), np.exp(viz_df['yhat_lower']), alpha=0.5,
                         #color='darkgray')
        ax1.set_title('Sales (Orange) vs Sales Forecast (Green) for shop ' + str(shopID1))
        ax1.set_ylabel('Sales amount')
        ax1.set_xlabel('Dates')

        L = ax1.legend()  # get the legend
        L.get_texts()[0].set_text('Actual Sales')  # change the legend text for 1st plot
        L.get_texts()[1].set_text('Forecasted Sales')  # change the legend text for 2nd plot
        graph = mpld3.fig_to_html(fig)

        fig, ax2 = plt.subplots(figsize=(7, 4))
        bar_width = 0.4
        opacity = 0.8
        bar1 = ax2.bar(ind, Holiday_Means, bar_width, opacity, label='Holidays')
        bar2 = ax2.bar(ind + bar_width, All_Year_Means, bar_width, opacity, label='Avg sales per year')
        ticks = pd.DataFrame(df2.index.year).drop_duplicates().start_date.values

        ax2.set_ylabel('Sales_amount')
        ax2.set_title('Holiday weekly sales (Xmas & NY) vs Average weekly sales per year (shop #%s)' % shopID1)
        plt.xticks(ind+0.25,ticks)
        ax2.legend()
        graph1 = mpld3.fig_to_html(fig)
        fig.clf()



        f_sale=str(float(predict_data1.future_sales[predict_data1.week_no == WeekNo1].values[0]))
        f_date=str(predict_data1.start_date[predict_data1.week_no == WeekNo1].values[0])[0:10]
        n_week = WeekNo1
        id_shop = shopID1
        sale_mean=avg_sale
        max_sale=maxSale
        min_sale=minSale
        item_trans=itemTrans
        price_item=priceofitem
        avg_wk_itemsold=avgitems
        tab_week=result.to_html(index = False)

    else:

        a = df2[['sales_amount', 'shop_id', 'week_no', 'transactions', 'item_sold']]
        y = a.iloc[:, 0]
        x = a.iloc[:, 3:5]
        # print (df2)
        from sklearn import linear_model
        regr2 = linear_model.LinearRegression()
        X1 = x
        y1 = y
        regr2.fit(X1, y1)
        y_predictions = regr2.predict(X1)
        y_predictions1 = pd.DataFrame(y_predictions)
        d = {'actual sales': y, 'predicted sales': y_predictions1}
        d1 = np.array(d)

        dates = pd.date_range(y.index[-1], periods=52, freq='W-MON', format='%Y-%m-%d')
        dates1 = pd.DataFrame(dates)

        mean_week_item = []
        for i in dates.week:
            mean_item_sold = a.item_sold[a.week_no == i].mean()
            mean_week_item.append(mean_item_sold)
        mean_week_item1 = pd.DataFrame(mean_week_item)

        trans_week_item = []
        for i1 in dates.week:
            mean_trans_sold = a.transactions[a.week_no == i1].mean()
            trans_week_item.append(mean_trans_sold)

        sales_week = []
        for ii1 in dates.week:
            mean_sales_sold = a.sales_amount[a.week_no == ii1].mean()
            sales_week.append(mean_sales_sold)

        dd = {'date': dates, 'weeks_no': dates.week, 'sales': sales_week, 'mean_item': mean_week_item,
              'mean_trans': trans_week_item}
        dd1 = pd.DataFrame(dd)

        dff1 = data_f[data_f.sales_status != 0]
        nulldetect = dff1.week_no.isnull()
        dff1.week_no.loc[nulldetect == True] = 54
        dff1['week_no'] = dff1.week_no - 2
        X_Cluster = dff1[['shop_id', 'sales_amount']]
        from sklearn.cluster import KMeans
        kmeans_model = KMeans(n_clusters=3, random_state=8).fit(X_Cluster)
        y_hat = kmeans_model.labels_  # clusters
        cen = kmeans_model.cluster_centers_
        y_hat1 = pd.DataFrame(y_hat)
        group_low_sales = X_Cluster[y_hat == 0]
        group_middle_sales = X_Cluster[y_hat == 2]
        group_high_sales = X_Cluster[y_hat == 1]

        fff = []
        for j in X_Cluster.shop_id:
            dfdf = X_Cluster.sales_amount[X_Cluster.shop_id == j].mean()
            fff.append(dfdf)
        f3 = pd.DataFrame(X_Cluster.shop_id.drop_duplicates())
        f4 = pd.DataFrame(fff)
        f5 = f4.drop_duplicates()
        f3['salle'] = f5.values

        # from sklearn.cluster import KMeans
        Xx2 = f3[['shop_id', 'salle']]
        kmeans_model2 = KMeans(n_clusters=3, random_state=8).fit(Xx2)
        y_hat2 = kmeans_model2.labels_  # clusters
        cen2 = kmeans_model2.cluster_centers_

        group_middle_sales2 = Xx2[y_hat2 == 0]
        group_high_sales2 = Xx2[y_hat2 == 2]
        group_low_sales2 = Xx2[y_hat2 == 1]

        nullweeks = dd1.weeks_no[dd1.mean_trans.isnull() == True]

        if (group_low_sales2.shop_id.values == shopID1).any() == True:
            cx = int(group_low_sales.sales_amount[group_low_sales.shop_id == shopID1].values.mean())
            trt = group_low_sales[group_low_sales.sales_amount > cx - 3000]
            trt2 = trt[trt.sales_amount < cx + 3000]
            valid_cls = dff1[['sales_amount', 'item_sold', 'transactions', 'week_no']].loc[trt2.index.values]
            #print("Cluster of shop %s is low sales" % shopID1)
        elif (group_middle_sales2.shop_id.values == shopID1).any() == True:
            # valid_cls=dff1[['sales_amount','item_sold','transactions','week_no']].loc[group_middle_sales.shop_id.index.values]
            cx = int(group_middle_sales.sales_amount[group_middle_sales.shop_id == shopID1].values.mean())
            trt = group_middle_sales[group_middle_sales.sales_amount > cx - 3000]
            trt2 = trt[trt.sales_amount < cx + 3000]
            valid_cls = dff1[['sales_amount', 'item_sold', 'transactions', 'week_no']].loc[trt2.index.values]
            #print("Cluster of shop %s is average sales" % shopID1)
        elif (group_high_sales2.shop_id.values == shopID1).any() == True:
            # valid_cls=dff1[['sales_amount','item_sold','transactions','week_no']].loc[group_high_sales.shop_id.index.values]
            cx = int(group_high_sales.sales_amount[group_high_sales.shop_id == shopID1].values.mean())
            trt = group_high_sales[group_high_sales.sales_amount > cx - 4000]
            trt2 = trt[trt.sales_amount < cx + 4000]
            valid_cls = dff1[['sales_amount', 'item_sold', 'transactions', 'week_no']].loc[trt2.index.values]
            #print("Cluster of shop %s is high sales" % shopID1)
        drr = valid_cls
        #print('Avg sales per week for whole period ',
        avg_sale=round(df2.sales_amount.mean(), 2)  # avg sales per week for whole period
        # avg_items_week=round(df2.item_sold[df2.week_no==17].mean(),2)# avg items for input week
        #print('Avg items sold per week for whole period ',
        #round(df2.item_sold.mean(), 2) # avg items per week for whole period
        # avg_trans_week=round(df2.transactions[df2.week_no==17].mean(),2)# avg trans for input week
        #print('Avg trans per week for whole period ',
        #round(df2.transactions.mean(), 2) # avg trans per week for whole period
        # avg_item_per_trans=round((df2.item_sold[df2.week_no==17]/df2.transactions[df2.week_no==17]).mean(),2)#items per transactions w
        itemTrans=round((df2.item_sold / df2.transactions).mean(), 2)
        # max_w=round(max(df2.sales_amount[df2.week_no==17]),2)
        # min_w=round(min(df2.sales_amount[df2.week_no==17]),2)
        maxSale=round(max(df2.sales_amount), 2)
        minSale=round(min(df2.sales_amount), 2)
        priceofitem = round((df2.sales_amount / df2.item_sold).mean(), 2)
        avgitems=round(df2.item_sold.mean(), 2)
        best = pd.DataFrame(df2[['week_no', 'sales_amount']][(df2.sales_amount <= max(df2.sales_amount))
                                                             & (df2.sales_amount >= max(df2.sales_amount) - 2000)])
        worst = pd.DataFrame(df2[['week_no', 'sales_amount']][(df2.sales_amount >= min(df2.sales_amount))
                                                              & (df2.sales_amount <= min(df2.sales_amount) + 1500)])
        best1 = best.rename(columns={'week_no': 'Best Weeks', 'sales_amount': 'Sales'})
        worst1 = worst.rename(columns={'week_no': 'Worst Weeks', 'sales_amount': 'Sales'})
        result = pd.concat([best1, worst1], axis=1)
        # result.fillna('  ')
        # result['Date']=result.index.values
        result.insert(0, 'Date', result.index.values)
        result = result.fillna('-')
        # worst=df2.week_no[df2.sales_amount>min(df2.sales_amount)]
        #df2[['week_no', 'sales_amount']][(df2.sales_amount >= min(df2.sales_amount)) & (df2.sales_amount <= min(df2.sales_amount) + 1500)])
        #df2[['week_no', 'sales_amount']][(df2.sales_amount <= max(df2.sales_amount)) & (df2.sales_amount >= max(df2.sales_amount) - 3000)])
        #print('Price of trans ', round((df2.sales_amount / df2.transactions).mean(), 2))
        #print('Price of item ', round((df2.sales_amount / df2.item_sold).mean(), 2))
        itt = []
        trr = []
        sale = []
        for i3 in nullweeks:
            item = drr.item_sold[drr.week_no == i3].mean()
            trans = drr.transactions[drr.week_no == i3].mean()
            salee = drr.sales_amount[drr.week_no == i3].mean()
            itt.append(item)
            trr.append(trans)
            sale.append(salee)
        df_insert = {'sales_amountt': sale, 'ittem': itt, 'trans': trr, 'weeks_no': nullweeks}
        df_insert1 = pd.DataFrame(df_insert)
        forecastdf = dd1.fillna({'mean_item': df_insert1.ittem, 'mean_trans': df_insert1.trans, 'sales': df_insert1.sales_amountt})
        regr3 = linear_model.LinearRegression()
        X = forecastdf[['mean_item', 'mean_trans']]
        Y = forecastdf.sales
        regr3.fit(X, Y)
        y_predictionss = regr3.predict(X)
        y_predictionss1 = pd.DataFrame(y_predictionss)
        forecastdf['future_sales1'] = y_predictionss1.values
        f1 = forecastdf.set_index('date')
        frames1 = [df2.sales_amount, f1.future_sales1]
        join1 = pd.concat(frames1)
        detrend_sdata1 = signal.detrend(join1)
        trend1 = join1 - detrend_sdata1


        r1 = []
        for jj1 in pd.DataFrame(df2.index.year.values).drop_duplicates().index.values:
            sale_year1 = df2.sales_amount[str(int(pd.DataFrame(df2.index.year).drop_duplicates().loc[jj1]))].mean()
            r1.append(sale_year1)
        years1 = pd.DataFrame(df2.index.year).drop_duplicates().start_date.values
        holday1 = []
        for t1 in years1[0:len(years1) - 1]:
            h1 = df2.sales_amount[df2.week_no >= 50][str(t1)].mean() + df2.sales_amount[df2.week_no <= 3][str(int(t1) + 1)].mean()
            holday1.append(h1 / 2)
        year_last1 = f1.future_sales1[f1.weeks_no >= 50][str(years1[-1])].mean() + f1.future_sales1[f1.weeks_no <= 3].mean()  # 2018
        holday1.append(year_last1 / 2)
        N1 = len(r1)
        Holiday_Means1 = holday1
        All_Year_Means1 = r1
        ind1 = np.arange(N1)

        f_sale = int(forecastdf.future_sales1[forecastdf.weeks_no == WeekNo1].values)
        f_date =str(forecastdf.date[forecastdf.weeks_no==WeekNo1].values[0])[0:10]
        n_week = WeekNo1
        id_shop = shopID1
        sale_mean = avg_sale
        max_sale = maxSale
        min_sale = minSale
        item_trans = itemTrans
        price_item=priceofitem
        avg_wk_itemsold = avgitems
        tab_week=result.to_html(index = False)
        # print(y.index)
        fig3, ax3 = plt.subplots(figsize=(7,4))
        # dates = pd.date_range(y.index[0], periods=104, freq='W-MON',format='%Y-%m-%d')
        # plt.plot(y.index,y,color='blue',label="actual sales")
        ax3.plot(y.index, a.sales_amount, color='red', label="actual sales")
        ax3.plot(dates, y_predictionss1, color='green', label="forecasted sales")
        ax3.plot(join1.index, trend1, color='blue', alpha=0.5, label='Trend')
        ax3.set_title('Comparison actual and predicted sales for whole period of shop ' + str(shopID1) + '\n')
        ax3.set_xlabel('Weeks')
        ax3.set_ylabel('Sales amount')
        ax3.legend()
        graph = mpld3.fig_to_html(fig3)
        fig3.clf()


        fig4, ax4 = plt.subplots(figsize=(7,4))
        bar_width1 = 0.4
        opacity1 = 0.8
        ax4.bar(ind1, Holiday_Means1, bar_width1, opacity1, label='Holidays')
        ax4.bar(ind1 + bar_width1, All_Year_Means1, bar_width1, opacity1, label='Avg sales per year')

        ax4.set_ylabel('Sales_amount')
        ax4.set_title('Holiday weekly sales (Xmas & NY) vs Average weekly sales per year (shop #%s)' % shopID1)
        plt.xticks(ind1 + 0.25, (pd.DataFrame(df2.index.year).drop_duplicates().start_date.values))
        ax4.legend()
        graph1 = mpld3.fig_to_html(fig4)



    return render_template('index.html',graph1=graph1,graph=graph,data=tab_week,value9=f_date,value8=avg_wk_itemsold,value7=price_item,value6=itemTrans,
                           value5=min_sale,value4=max_sale,value3=sale_mean,
                           value2=id_shop,value1=n_week,value=f_sale)
Example #52
0
def cmplxIQ_fit(paramsVec, freqs, data=None, eps=None, **kwargs):
    """Return complex S21 resonance model or, if data is specified, a residual.

    Parameters
    ----------
    params : list-like
        A an ``lmfit.Parameters`` object containing (df, f0, qc, qi, gain0, gain1, gain2, pgain0, pgain1, pgain2)
    freqs : list-like
        A list of frequency points at which the model is calculated
    data : list-like (optional)
        A list of complex data in the form I + Q where I and Q are both lists of data and
        ``len(I) == len(Q) == len(freqs)``. If data is not passed, then the return value is the model
        calculated at each frequency point.
    eps : list-like (optional)
        A list of errors, one for each point in data.
    kwargs : dict (optional)
        Currently no keyword arguments are accepted.

    Returns
    -------
    model or (model-data) : ``numpy.array``
        If data is specified, the return is the residuals. If not, the return is the model
        values calculated at the frequency points. The returned array is in the form
        ``I + Q`` or ``residualI + residualQ``.

    """
    #Check if the paramsVec looks like a lmfit params object. If so, unpack to list
    if hasattr(paramsVec, 'valuesdict'):
        paramsDict = paramsVec.valuesdict()
        paramsVec = [value for value in paramsDict.values()]

    #intrinsic resonator parameters
    df = paramsVec[0]  #frequency shift due to mismatched impedances
    f0 = paramsVec[1]  #resonant frequency
    qc = paramsVec[2]  #coupling Q
    qi = paramsVec[3]  #internal Q

    #0th, 1st, and 2nd terms in a taylor series to handle magnitude gain different than 1
    gain0 = paramsVec[4]
    gain1 = paramsVec[5]
    gain2 = paramsVec[6]

    #0th and 1st terms in a taylor series to handle phase gain different than 1
    pgain0 = paramsVec[7]
    pgain1 = paramsVec[8]

    #Need to add an if statement here to keep compatibility with older version
    #Will be deprecated in the future
    if len(paramsVec) == 12:
        pgain2 = paramsVec[9]

        #Voltage offset at mixer output. Not needed for VNA
        Ioffset = paramsVec[10]
        Qoffset = paramsVec[11]
    else:
        print(
            "Warning: new model also fits for quadratic phase. Setting pgain2 = 0."
        )
        print(
            "If using Resonator.do_lmfit() pass kwarg: pgain2_vary = False for legacy behavior."
        )
        pgain2 = 0

        #Voltage offset at mixer output. Not needed for VNA
        Ioffset = paramsVec[9]
        Qoffset = paramsVec[10]

    #Make everything referenced to the shifted, unitless, reduced frequency
    fs = f0 + df
    ff = (freqs - fs) / fs

    #Except for the gain, which should reference the file midpoint
    #This is important because the baseline coefs shouldn't drift
    #around with changes in f0 due to power or temperature

    #Of course, this philosophy goes out the window if different sweeps have
    #different ranges.
    fm = freqs[int(np.round((len(freqs) - 1) / 2.0))]
    ffm = (freqs - fm) / fm

    #Calculate the total Q_0
    q0 = 1. / (1. / qi + 1. / qc)

    #Calculate magnitude and phase gain
    gain = gain0 + gain1 * ffm + 0.5 * gain2 * ffm**2
    pgain = np.exp(1j * (pgain0 + pgain1 * ffm + 0.5 * pgain2 * ffm**2))

    #Allow for voltage offset of I and Q
    offset = Ioffset + 1j * Qoffset

    #Calculate model from params at each point in freqs
    modelCmplx = -gain * pgain * (1. / qi + 1j * 2.0 *
                                  (ff + df / fs)) / (1. / q0 +
                                                     1j * 2.0 * ff) + offset

    #Package complex data in 1D vector form
    modelI = np.real(modelCmplx)
    modelQ = np.imag(modelCmplx)
    model = np.concatenate((modelI, modelQ), axis=0)

    #Calculate eps from stdev of first 10 pts of data if not supplied
    if eps is None and data is not None:
        dataI, dataQ = np.split(data, 2)
        epsI = np.std(sps.detrend(dataI[0:10]))
        epsQ = np.std(sps.detrend(dataQ[0:10]))
        eps = np.concatenate((np.full_like(dataI,
                                           epsI), np.full_like(dataQ, epsQ)))

    #Return model or residual
    if data is None:
        return model
    else:
        return (model - data) / eps
Example #53
0
 human = int(os.path.basename(file)
             [:7])  ## get a person's number from file name
 for i in range(
         int(N_Origin / N)
 ):  # 256 frames are divided by four, and the process is done every 64 frames
     for loop in range(
             HANDDATA_Num):  ## four times (L1,L2,R1,R2)
         div_data = data[loop][
             i * N:(i + 1) *
             N, :].T  ## div_data = [L1's first 64 frames, L2's first 64 frames, R1's first 64 frames, … R2's last 64 frames]
         for sensor_num in range(PARAMETER_Num):
             # if sensor_num < From or sensor_num > To: ## if you want to use only partial feature, you can exclude here freely.
             #     continue
             # if loop == 0 and i == 0: print(sensor_num) #display sensor number
             data_window = signal.detrend(
                 div_data[sensor_num], type="constant"
             ) * window  ## remove trend(constant) & apply window function
             F_window = np.abs(
                 fft(data_window) / (N / 2)
             ) * 1 / (
                 sum(window) / N
             )  ## do FFT & get the absolute value (adjust the amplitude value)
             x.append(
                 F_window[:int(N / 2)]
             )  ## MinMax Normalization & get the low frequency part
 x = np.array(x).flatten()
 X.append(
     x
 )  ## add x(frequency data) to list. the data consists of 32 frequency components * four (L1,L2,R1,R2) * four (divide number) * X features = 512*X dimensions
 y.append(int(type % 2))  ## add answer to list
 human_all.append(int(human))  ## make human list
Example #54
0
 try:
     dt_E = np.mean(np.diff(this_seis[0].times()))
     dt_N = np.mean(np.diff(this_seis[1].times()))
     dt_Z = np.mean(np.diff(this_seis[2].times()))
 except:
     continue
 if not (dt_E == dt_N == dt_Z and len(this_seis[0].data) == len(
         this_seis[1].data) == len(this_seis[2].data)):
     continue
 dt = dt_E
 # -------- Detrend -----------------#
 if change_E:
     this_seis[0].data = this_seis[0].data * -1
 if change_N:
     this_seis[1].data = this_seis[1].data * -1
 this_seis[0].data = detrend(this_seis[0].data, type='linear')
 this_seis[0].data = detrend(this_seis[0].data, type='constant')
 this_seis[1].data = detrend(this_seis[1].data, type='linear')
 this_seis[1].data = detrend(this_seis[1].data, type='constant')
 this_seis[2].data = detrend(this_seis[2].data, type='linear')
 this_seis[2].data = detrend(this_seis[2].data, type='constant')
 # ------- Rotate ENZ to RTZ ---------#
 if comp == 1 or comp == 2 or comp == 3:
     E = this_seis[2].data
     N = this_seis[1].data
     Z = this_seis[0].data
 else:
     E = this_seis[0].data
     N = this_seis[1].data
     Z = this_seis[2].data
 (this_seis[0].data, this_seis[1].data,
def dCleaner(pm_data, trend):
    '''Cleaning data for phasemeter glitching. Also optional detrend of data. '''
    for i in range(len(pm_data)):
        if pm_data[i] > 5e8: pm_data[i] = 10003.798e5 - pm_data[i]
    if trend == True: pm_data = signal.detrend(pm_data, type='linear')
    return pm_data
Example #56
0
#Q_ek_f_mean = Q_ek_f.mean(dim='time')
#
#Q_ek = Q_ek.where(np.abs(lats) > 0)

# Compute monthly anomaly
if anom_flag:
    Q_net_surf, Q_net_surf_clim = st.anom(Q_net_surf)
    thf, thf_clim = st.anom(thf)
    sst, sst_clim = st.anom(sst)
    Q_ek, Q_ek_clim = st.anom(Q_ek)
    #Q_ek_f,Q_ek_f_clim = st.anom(Q_ek_f)

# Remove linear trend
if detr:
    sst = sst.fillna(0.)
    sst = xr.DataArray(signal.detrend(sst, axis=0),
                       dims=sst.dims,
                       coords=sst.coords)

    # h = h.fillna(0.)
    # h = xr.DataArray(signal.detrend(h, axis=0), dims=h.dims, coords=h.coords)

    thf = thf.fillna(0.)
    thf = xr.DataArray(signal.detrend(thf, axis=0),
                       dims=thf.dims,
                       coords=thf.coords)

    Q_net_surf = Q_net_surf.fillna(0.)
    Q_net_surf = xr.DataArray(signal.detrend(Q_net_surf, axis=0),
                              dims=Q_net_surf.dims,
                              coords=Q_net_surf.coords)
Example #57
0
                    ### Area 1.9x2.5 grid cell [58466.1 = (278.30) * (210.083)]
                    valyr[ti, i, j] = 58466.1 * np.cos(np.radians(lat2[i, j]))
        ext[ti] = np.nansum(valyr[ti, :, :]) / 1e6

    ### Reshape array for [year,month]
    ext = np.reshape(ext, (ext.shape[0] // 12, 12))
    return ext


### Calculate functions
lat, lon, sic = readSIEData()
lon2, lat2 = np.meshgrid(lon, lat)
ext = calcExtent(sic, lat2)

### Detrend data
extdt = sss.detrend(ext, axis=0, type='linear')

### Calculate zscores
extdtzz = sts.zscore(extdt, axis=0)
extzz = sts.zscore(ext, axis=0)

### Calculate standard deviation
extstd = np.std(ext, axis=0)

### Calculate OND sea ice index
extond = np.nanmean(ext[:, -3:], axis=1)
extondzz = sts.zscore(extond, axis=0)
iceslice05_ond = np.where(extondzz <= -0.5)[0]
yearslice05_ond = years[iceslice05_ond]
iceslice1_ond = np.where(extondzz <= -1.)[0]
yearslice1_ond = years[iceslice1_ond]
Example #58
0
def _createPhysiologicalNuisanceRegressors(inputname,
                                           subj,
                                           run,
                                           globalmask,
                                           wmmask,
                                           ventriclesmask,
                                           aCompCor=5):
    """
    inputname   -   4D input time series to obtain nuisance regressors
    run      -   fMRI run
    globalmask  -   whole brain mask to extract global time series
    wmmask      -   white matter mask (functional) to extract white matter time series
    ventriclesmask- ventricles mask (functional) to extract ventricle time series
    aCompCor    -   Create PC component time series of white matter and ventricle time series, using first n PCs
    """

    # Load raw fMRI data (in volume space)
    print 'Loading raw fMRI data'
    fMRI4d = nib.load(inputname).get_data()

    ##########################################################
    ## Nuisance time series (Global signal, WM, and Ventricles)
    print 'Obtaining standard global, wm, and ventricle signals and their derivatives'
    # Global signal
    globalMask = nib.load(globalmask).get_data()
    globalMask = np.asarray(globalMask, dtype=bool)
    globaldata = fMRI4d[globalMask].copy()
    globaldata = signal.detrend(globaldata, axis=1, type='constant')
    globaldata = signal.detrend(globaldata, axis=1, type='linear')
    global_signal1d = np.mean(globaldata, axis=0)
    # White matter signal
    wmMask = nib.load(wmmask).get_data()
    wmMask = np.asarray(wmMask, dtype=bool)
    wmdata = fMRI4d[wmMask].copy()
    wmdata = signal.detrend(wmdata, axis=1, type='constant')
    wmdata = signal.detrend(wmdata, axis=1, type='linear')
    wm_signal1d = np.mean(wmdata, axis=0)
    # Ventricle signal
    ventricleMask = nib.load(ventriclesmask).get_data()
    ventricleMask = np.asarray(ventricleMask, dtype=bool)
    ventricledata = fMRI4d[ventricleMask].copy()
    ventricledata = signal.detrend(ventricledata, axis=1, type='constant')
    ventricledata = signal.detrend(ventricledata, axis=1, type='linear')
    ventricle_signal1d = np.mean(ventricledata, axis=0)

    del fMRI4d

    ## Create derivative time series (with backward differentiation, consistent with 1d_tool.py -derivative option)
    # Global signal derivative
    global_signal1d_deriv = np.zeros(global_signal1d.shape)
    global_signal1d_deriv[1:] = global_signal1d[1:] - global_signal1d[:-1]
    # White matter signal derivative
    wm_signal1d_deriv = np.zeros(wm_signal1d.shape)
    wm_signal1d_deriv[1:] = wm_signal1d[1:] - wm_signal1d[:-1]
    # Ventricle signal derivative
    ventricle_signal1d_deriv = np.zeros(ventricle_signal1d.shape)
    ventricle_signal1d_deriv[
        1:] = ventricle_signal1d[1:] - ventricle_signal1d[:-1]

    ## Write to h5py
    # Create h5py output
    h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5', 'a')
    try:
        h5f.create_dataset(run + '/global_signal', data=global_signal1d)
        h5f.create_dataset(run + '/global_signal_deriv',
                           data=global_signal1d_deriv)
        h5f.create_dataset(run + '/wm_signal', data=wm_signal1d)
        h5f.create_dataset(run + '/wm_signal_deriv', data=wm_signal1d_deriv)
        h5f.create_dataset(run + '/ventricle_signal', data=ventricle_signal1d)
        h5f.create_dataset(run + '/ventricle_signal_deriv',
                           data=ventricle_signal1d_deriv)
    except:
        del h5f[run +
                '/global_signal'], h5f[run + '/global_signal_deriv'], h5f[
                    run + '/wm_signal'], h5f[run + '/wm_signal_deriv'], h5f[
                        run +
                        '/ventricle_signal'], h5f[run +
                                                  '/ventricle_signal_deriv']
        h5f.create_dataset(run + '/global_signal', data=global_signal1d)
        h5f.create_dataset(run + '/global_signal_deriv',
                           data=global_signal1d_deriv)
        h5f.create_dataset(run + '/wm_signal', data=wm_signal1d)
        h5f.create_dataset(run + '/wm_signal_deriv', data=wm_signal1d_deriv)
        h5f.create_dataset(run + '/ventricle_signal', data=ventricle_signal1d)
        h5f.create_dataset(run + '/ventricle_signal_deriv',
                           data=ventricle_signal1d_deriv)

    ##########################################################
    ## Obtain aCompCor regressors using first 5 components of WM and Ventricles (No GSR!)
    ncomponents = 5
    nTRs = len(global_signal1d)
    print 'Obtaining aCompCor regressors and their derivatives'
    # WM time series
    wmstart = time.time()
    # Obtain covariance matrix, and obtain first 5 PCs of WM time series
    tmpcov = np.corrcoef(wmdata.T)
    eigenvalues, topPCs = scipy.sparse.linalg.eigs(tmpcov,
                                                   k=ncomponents,
                                                   which='LM')
    # Now using the top n PCs
    aCompCor_WM = topPCs
    #    wmend = time.time() - wmstart
    #    print 'WM aCompCor took', wmend, 'seconds'

    # Ventricle time series
    ventstart = time.time()
    # Obtain covariance matrix, and obtain first 5 PCs of ventricle time series
    tmpcov = np.corrcoef(ventricledata.T)
    eigenvalues, topPCs = scipy.sparse.linalg.eigs(tmpcov,
                                                   k=ncomponents,
                                                   which='LM')
    # Now using the top n PCs
    aCompCor_ventricles = topPCs
    #    ventricletime = time.time() - ventstart
    #    print 'Ventricle aCompCor took', ventricletime, 'seconds'

    # White matter signal derivative using backwards differentiation
    aCompCor_WM_deriv = np.zeros(aCompCor_WM.shape)
    aCompCor_WM_deriv[1:, :] = np.real(aCompCor_WM[1:, :]) - np.real(
        aCompCor_WM[:-1, :])
    # Ventricle signal derivative
    aCompCor_ventricles_deriv = np.zeros(aCompCor_ventricles.shape)
    aCompCor_ventricles_deriv[1:, :] = np.real(
        aCompCor_ventricles[1:, :]) - np.real(aCompCor_ventricles[:-1, :])

    ## Write to h5py
    try:
        h5f.create_dataset(run + '/aCompCor_WM', data=aCompCor_WM)
        h5f.create_dataset(run + '/aCompCor_WM_deriv', data=aCompCor_WM_deriv)
        h5f.create_dataset(run + '/aCompCor_ventricles',
                           data=aCompCor_ventricles)
        h5f.create_dataset(run + '/aCompCor_ventricles_deriv',
                           data=aCompCor_ventricles_deriv)
    except:
        del h5f[run + '/aCompCor_WM'], h5f[run + '/aCompCor_WM_deriv'], h5f[
            run + '/aCompCor_ventricles'], h5f[run +
                                               '/aCompCor_ventricles_deriv']
        h5f.create_dataset(run + '/aCompCor_WM', data=aCompCor_WM)
        h5f.create_dataset(run + '/aCompCor_WM_deriv', data=aCompCor_WM_deriv)
        h5f.create_dataset(run + '/aCompCor_ventricles',
                           data=aCompCor_ventricles)
        h5f.create_dataset(run + '/aCompCor_ventricles_deriv',
                           data=aCompCor_ventricles_deriv)

    ##########################################################
    ## Load motion parameters, and calculate motion spike regressors

    h5f.close()
Example #59
0
lon_s4, lat_s4 = np.meshgrid(hgt_s4.longitude.values, hgt_s4.latitude.values)
lon_erai, lat_erai = np.meshgrid(hgt_erai.longitude.values, hgt_erai.latitude.values)

#seasonal means
season = ['ASO', 'SON', 'OND', 'NDJ', 'DJF']
lmonth = ['Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mm = [7, 8, 9, 10, 11, 0, 1]
#loop over seasons, selec data and performs all composites
for i in np.arange(0, 5):
	#hgt_erai_seas_mean = hgt_erai['z'].resample(time='QS-' + lmonth[i]).mean(dim='time',skipna=True)
	mes = datetime.datetime.strptime(lmonth[i], '%b').month
	#hgt_erai_smean = hgt_erai_seas_mean.sel(time= np.logical_and(hgt_erai_seas_mean['time.month'] == mes, hgt_erai_seas_mean['time.year']!=2002))
	hgt_erai_seas_mean = hgt_erai.sel(time= np.logical_and(hgt_erai['time.month'] == mes, hgt_erai['time.year']!=2002))
	hgt_erai_smean = hgt_erai_seas_mean.z.values
	hgt_erai_smean= np.nan_to_num(hgt_erai_smean, np.nanmean(hgt_erai_smean))
	hgt_erai_smean = signal.detrend(hgt_erai_smean, axis=0, type='linear')
	hgt_erai_EN = np.nanmean(hgt_erai_smean[index_ninio_erai_upper.values, :, :], axis=0)
	SS_erai_EN = np.nanvar(hgt_erai_smean[index_ninio_erai_upper.values, :, :],
			       axis=0) / np.sum(index_ninio_erai_upper.values)
	hgt_erai_LN = np.nanmean(hgt_erai_smean[index_ninio_erai_lower.values, :, :], axis=0)
	SS_erai_LN = np.nanvar(hgt_erai_smean[index_ninio_erai_lower.values, :, :],
			       axis=0) / np.sum(index_ninio_erai_lower.values)
	hgt_erai_N = np.nanmean(hgt_erai_smean[index_ninio_erai_normal.values, :, :], axis=0)
	hgt_erai_WSPV = np.nanmean(hgt_erai_smean[index_PV_erai_upper.values, :, :], axis=0)
	SS_erai_WSPV = np.nanvar(hgt_erai_smean[index_PV_erai_upper.values, :, :],
			       axis=0) / np.sum(index_PV_erai_upper.values)
	hgt_erai_SSPV = np.nanmean(hgt_erai_smean[index_PV_erai_lower.values, :, :], axis=0)
	SS_erai_SSPV = np.nanvar(hgt_erai_smean[index_PV_erai_lower.values, :, :],
			       axis=0) / np.sum(index_PV_erai_lower.values)
	hgt_erai_NSPV = np.nanmean(hgt_erai_smean[index_PV_erai_normal.values, :, :], axis=0)
	#hgt_s4_smean = np.nanmean(hgt_s4.z.values[i:i + 3, :, :, :], axis=0)	
Example #60
0
    def test_detrend(self, random, axis):

        x = random.normal(2, 1, (40, 20))
        x_out = signals.detrend(x, axis=axis)
        x_out_scipy = scipy_signal.detrend(x, axis=axis)
        assert np.array_equal(x_out, x_out_scipy)