示例#1
0
def check_preamble_properties(preamble, x_preamble):
    x_1st = x_preamble[0:len(x_preamble) // 2]
    x_2nd = x_preamble[-len(x_preamble) // 2:]
    if not np.all(np.abs(x_1st - x_2nd) < 1e-12):
        print np.abs(x_1st - x_2nd)
        raise ValueError('preamble timeslots do not repeat!')
    from correlation import cross_correlate_naive, auto_correlate_halfs
    from utils import calculate_signal_energy
    x_ampl = np.sqrt(calculate_signal_energy(x_preamble))
    preamble *= x_ampl
    x_preamble *= x_ampl
    x_energy = calculate_signal_energy(x_preamble)
    if np.abs(2. * auto_correlate_halfs(x_preamble) / x_energy) -1. > 1e-10:
        raise ValueError('auto correlating halfs of preamble fails!')

    print 'normalized preamble xcorr val: ', np.correlate(x_preamble, x_preamble) / x_energy
    print 'windowed normalized preamble: ', np.correlate(preamble[-len(x_preamble):], x_preamble) / x_energy
    fxc = np.correlate(preamble, x_preamble, 'full') / x_energy
    vxc = np.correlate(preamble, x_preamble, 'valid') / x_energy
    nxc = cross_correlate_naive(preamble, x_preamble) / x_energy
    import matplotlib.pyplot as plt

    plt.plot(np.abs(fxc))
    plt.plot(np.abs(vxc))
    plt.plot(np.abs(nxc))
    plt.show()
示例#2
0
def ccovf(x, y, unbiased=True, demean=True):
    ''' crosscovariance for 1D

    Parameters
    ----------
    x, y : arrays
       time series data
    unbiased : boolean
       if True, then denominators is n-k, otherwise n

    Returns
    -------
    ccovf : array
        autocovariance function

    Notes
    -----
    This uses np.correlate which does full convolution. For very long time
    series it is recommended to use fft convolution instead.
    '''
    n = len(x)
    if demean:
        xo = x - x.mean()
        yo = y - y.mean()
    else:
        xo = x
        yo = y
    if unbiased:
        xi = np.ones(n)
        d = np.correlate(xi, xi, 'full')
    else:
        d = n
    return (np.correlate(xo, yo, 'full') / d)[n - 1:]
示例#3
0
 def _updateBuffer(self, v):
     """
     Keep a buffer of the running data and process it to determine if there is
     a peak. 
     """
     self._rtData.append(v)
     wndwCenter = int(np.floor(self._window / 2.0))
     # pop the end of the buffer
     if len(self._rtData) > self._window:
         self._rtData = self._rtData[1:]
         if self._isPeak:
             lm = self._rtData.findPeaks()
             for l in lm:
                 if l[0] == wndwCenter and l[1] > self._cutoff:
                     if self.doCorr:
                         corrVal = np.correlate(self._rtData.normalize(), self._template)
                         thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
                         if corrVal[0] > thresh:
                             self.count += 1
                     else:
                         self.count += 1
         else:
             lm = self._rtData.findValleys()
             for l in lm:
                 if l[0] == wndwCenter and l[1] < self._cutoff:
                     if self.doCorr:
                         corrVal = np.correlate(self._rtData.normalize(), self._template)
                         thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
                         if corrVal[0] > thresh:
                             self.count += 1
                     else:
                         self.count += 1
     return self.count
示例#4
0
    def plot_wavenvelope(self, ax, w_start, w_end):

        """ This function plots the envelope of the recording.

        :param ax: The axis in which you wish to plot.
        :param w_start: Start of the best window.
        :param w_end: End of the best window.
        """
        window_size = int(0.05 * self._sample_rate)  # 0.050 are 50 milliseconds for the envelope window!
        w = 1.0 * np.ones(window_size) / window_size
        envelope = (np.sqrt((np.correlate(self._eod ** 2, w, mode='same') -
                    np.correlate(self._eod, w, mode='same') ** 2)).ravel()) * np.sqrt(2.)
        upper_bound = np.max(envelope) + np.percentile(envelope, 1)
        ax.fill_between(self._time[::500], y1=-envelope[::500], y2=envelope[::500], color='purple', alpha=0.5)
        ax.plot((w_start, w_start), (-upper_bound, upper_bound), 'k--', linewidth=2)
        ax.plot((w_end, w_end), (-upper_bound, upper_bound), 'k--', linewidth=2)
        ax.text((w_start + w_end) / 2., upper_bound - np.percentile(envelope, 10), 'Analysis Window',
                rotation='horizontal', horizontalalignment='center', verticalalignment='center', fontsize=14)

        ax.set_ylim(-upper_bound, upper_bound)
        ax.set_xlabel('Time [s]', fontsize=16)
        ax.set_ylabel('Signal Amplitude [au]', fontsize=16)
        ax.tick_params(axis='both', which='major', labelsize=14)

        pass
示例#5
0
def get_best_time_window(data, samplerate, fundamental_frequency, eod_cycles):
    eod_peaks1, eod_peak_idx1, _, _ = peakdet(data)

    max_time = len(data) / samplerate
    time_for_eod_cycles_in_window = eod_cycles / fundamental_frequency

    if time_for_eod_cycles_in_window > max_time * .2:
        time_for_eod_cycles_in_window = max_time * .2
        warnings.warn("You are reqeusting a window that is too long. Using T=%f" % (time_for_eod_cycles_in_window,))

    sample_points_in_window = int(fundamental_frequency * time_for_eod_cycles_in_window)

    tApp = np.arange(len(data)) / samplerate
    w1 = np.ones(sample_points_in_window) / sample_points_in_window

    local_mean = np.correlate(eod_peaks1, w1, mode='valid')
    local_std = np.sqrt(np.correlate(eod_peaks1 ** 2., w1, mode='valid') - local_mean ** 2.)
    COV = local_std / local_mean

    mi = min(COV)
    for ind, j in enumerate(COV):
        if j == mi:
            v = (eod_peak_idx1[ind])

    idx = (tApp >= tApp[v]) & (tApp < tApp[v] + time_for_eod_cycles_in_window)
    tApp = tApp[idx]
    dat_app = data[idx]
    tApp = tApp - tApp[0]

    return tApp, dat_app
示例#6
0
文件: bcv.py 项目: sudughonge/bcv
def linearCouplingCoeff2(dataH, dataX, timeH, timeX, transFnXtoH, segStartTime,
			segEndTime, timeShift, samplFreq, logFid, debugLevel):
  # LINEARCOUPLINGCOEFF - calculate the cross correlation coeff b/w the gravitational
  # ave channel H and the "projected" instrumental channel X. The noise in the
  # instrumental channel X is projected to the domain of the H using a linear coupling
  # function Txh


  rXH = np.asarray([])
  rMaxXH = np.asarray([])
  if((len(dataH)==0) | (len(dataX)==0)):
    logFid.write('Error: One or more data vectors are empty..\n')
    logFid.write('Error: len(dataH) = %d len(dataX) = %d..\n' %(len(dataH), len(dataX[0])))
  
  elif(len(dataH)!=len(dataX[0])):
    logFid.write('Error: Different lengths. len(dataH) = %d len(dataX) = %d..\n'%(len(dataH), len(dataX[0])))
  else:
    dataH = dataH #- np.mean(dataH)
    dataX = dataX[0] #- np.mean(dataX[0])
    
    segIdxH = np.intersect1d(np.where(timeH>=segStartTime)[0], np.where(timeH<segEndTime)[0])
    dataH = dataH[segIdxH]
    
    segIdxX = np.intersect1d(np.where(timeX + timeShift >= segStartTime)[0], np.where(timeX + timeShift < segEndTime)[0])
    dataX = dataX[segIdxX]
    
    
    
    a = np.correlate(dataH, dataX)/(np.sqrt(np.correlate(dataH, dataH)*np.correlate(dataX, dataX)))
    rXH = np.append(rXH, a)
    rMaxXH = np.append(rMaxXH, a)
    return [rXH, rMaxXH]  
示例#7
0
def determineDelay(source, target, maxdel=2**16, ax=None):
    '''
    Determine the delay between two signals
    (based on correlation extrema)

    Parameters:
    * Signals
      - source
      - target
    * maxdel: maximum delay to look for (in both directions)
    '''
    sample_start = 0
    xd = source[sample_start:sample_start+maxdel]
    yd = target[sample_start:sample_start+maxdel]
    Cxx = np.correlate(xd, xd, 'full')
    Cxy = np.correlate(yd, xd, 'full')
    Pkx = np.argmax(np.abs(Cxx))
    Pky = np.argmax(np.abs(Cxy))
    if ax:
        try:
            ax.plot(Cxx)
        except AttributeError:
            fig, ax = pl.subplots(1)
            ax.plot(Cxx)
        ax.plot(Cxy)
        ax.axvline(Pkx, color='red')
        ax.plot(Pky, Cxy[Pky], 'o')

    delay = Pky-Pkx
    return delay
示例#8
0
def calculate_maxcrosscorrelation(reference_signal, unknown_signal):    
    
    '''
    function:
    ---------
    given a reference signal and an unknown signal, calculate the max cross correlation score. the higher the score,
    the more similar two signals are. 
    
    the max cross correlation score will be used to identify events.
    
    parameters:
    -----------
    @reference_signal: 150 unit numpy array, representing reference signal.
    @unknown_signal: 150 unit numpy array
    
    returns:
    --------
    @score: int between [0,1]; represents similarity between two curves. 
    '''
    
    # https://stackoverflow.com/questions/1289415/what-is-a-good-r-value-when-comparing-2-signals-using-cross-correlation
    x = max(np.correlate(reference_signal, reference_signal, 'full'))
    y = max(np.correlate(unknown_signal, unknown_signal, 'full'))
    z = max(np.correlate(reference_signal, unknown_signal, 'full'))
    score = (z ** 2) / float(x * y)
    
    return score
def correlation (results = [], bin_size = 100, N =1000):
	wait_time=0.
	print 'N = ',N
	nr_datasets = results ['max_ind']
	ind = 0
	for counter in np.arange(nr_datasets):
		dati = results [str(counter)]
		if (len(dati)>2*N+1):
			if (bin_size>1 ):
				b = bin_data (data = dati, bin_size = bin_size)
			else:
				b = dati
							
			t = np.arange(len(b))*bin_size*(20e-6+wait_time*1e-6)	
			mu = np.mean(b)
			sigma = np.std(b)
			corr = np.correlate (b-mu, b-mu, 'full')/(np.correlate(b-mu, b-mu)+0.)
			t_corr = (np.arange (len(corr))-len(corr)/2.)*(wait_time+20.)*1e-6*bin_size

			nn = len(corr)
			corr2 = corr [nn/2-N:nn/2+N]
			t_corr2 = t_corr [nn/2-N:nn/2+N]
			
			if (ind == 0):
				avg_corr = corr2			
			else:
				avg_corr = avg_corr+corr2
			ind = ind + 1
	
	avg_corr[N] = 0
	avg_corr = avg_corr/max(avg_corr)			
	return t_corr2, avg_corr
示例#10
0
	def correlateData(self,frameLimit):
		sample = []
		self.fh.seek((self.startFrame)*4128,0)
		steps = frameLimit/10
		totalTime = datetime.now()
		print 'Correlating [          ]',
		print '\b'*12,
		sys.stdout.flush()
		for p in xrange(frameLimit):
			startTime = datetime.now()
			frame = drx.readFrame(self.fh)		
			
			if frame.parseID()[1] == 1:
				self.realTune1 = self.realTune1 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune1 = self.imagTune1 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			else:
				self.realTune2 = self.realTune2 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune2 = self.imagTune2 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			if p%steps == 0:
				print '\b=',
				sys.stdout.flush()
		print '\b] Done'
		self.startFrame += frameLimit	
		#self.fh.close()
		print 'Read time: ' + str(datetime.now() - totalTime)
示例#11
0
def correlationIndividual(data, idx = (0,1), cls = -1, delay = (-100, 100)):
  """Calculate corrs and auto correlation in time between the various measures"""

  n = len(idx);  
  means = np.mean(data[:,:-1], axis = 0);
  
  nd = delay[1] - delay[0] + 1;
  
  cc = np.zeros((nd,n,n))
  for i in range(n):
    for j in range(n):
        if delay[0] < 0:
          cm = np.correlate(data[:, i] - means[i], data[-delay[0]:, j] - means[j]);
        else:
          cm = [0];
        
        if delay[1] > 0:
          cp = np.correlate(data[:, j] - means[j], data[delay[1]:, i] - means[i]);
        else:
          cp = [0];
        
        ca = np.concatenate((cm[1:], cp[::-1]));
        
        if delay[0] > 0:
          cc[:,i,j] = ca[delay[0]:];
        elif delay[1] < 0:
          cc[:,i,j] = ca[:-delay[1]];
        else:
          cc[:,i,j] = ca;
  
  return cc;
示例#12
0
def _corr_ax1(input_image):
    """
    Internal helper function that finds the best estimate for the
    location of the vertical mirror plane.  For each row the maximum
    of the correlating with it's mirror is found.  The most common value
    is reported back as the location of the mirror plane.

    Parameters
    ----------
    input_image : ndarray
        The input image

    Returns
    -------
    vals : ndarray
        histogram of what pixel has the highest correlation

    bins : ndarray
        Bin edges for the vals histogram
    """
    dim = input_image.shape[1]
    m_ones = np.ones(dim)
    norm_mask = np.correlate(m_ones, m_ones, mode='full')
    # not sure that the /2 is the correct correction
    est_by_row = [np.argmax(np.correlate(v, v[::-1],
                                         mode='full')/norm_mask) / 2
             for v in input_image]
    return np.histogram(est_by_row, bins=np.arange(0, dim + 1))
 def find(self, target):
     if len(target) == 4:
         #check pattern d
         sum = 0
         for i in range(len(target)):
             sum += np.correlate(target[i], self.pd[i])[0]
         if sum >= self.threshold_expand['pd']:
             return True
         else:
             return False
         
     elif len(target) == 3:
         if len(target[0]) == 4:
             #check pattern c
             sum = 0
             for i in range(len(target)):
                 sum += np.correlate(target[i], self.pc[i])[0]
             if sum >= self.threshold_expand['pc']:
                 return True
             else:
                 return False
         elif len(target[0]) == 3:
             # common cases
             for k in self.threshold:
                 sum = 0
                 pt = k[0]
                 tr = k[1]
                 for i in range(len(target)):
                     sum += np.correlate(target[i], pt[i])[0]
                 if sum >= tr:
                     return True
             return False
def chickling_corr(shotno, date=time.strftime("%Y%m%d"), bandwidth=40000):

	fname, data = file_finder(shotno,date)
		
	samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth)
	phase_avr_co2 = np.zeros(samplesize)
	phase_avr_hene = np.zeros(samplesize)

	#reshape the array of x points (20M for 1s) into a 2d array each with 40k segments.
	phasediff_co2 = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))
	phasediff_hene = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))

	#for each horizontal column perform an average
	for i in range(0,samplesize):
		phase_avr_co2[i] = np.mean(phasediff_co2[i])
		phase_avr_hene[i] = np.mean(phasediff_hene[i])

	x = np.linspace(0,1,samplesize)
	plt.figure("2 Channels | Blue = Scene | Orange = Reference | Green = Cross-Correlation | shot " + str(shotno) +  " Date " + str(date))
	plt.xlabel("Time, s")
	plt.ylabel("Phase Difference, Radians")
	plt.plot(x,phase_avr_co2-np.average(phase_avr_co2))
	plt.plot(x,phase_avr_hene-np.average(phase_avr_hene))

	a = (phase_avr_co2 - np.mean(phase_avr_co2)) / (np.std(phase_avr_co2) * len(phase_avr_co2))
	b = (phase_avr_hene - np.mean(phase_avr_hene)) / (np.std(phase_avr_hene))
	yc = np.correlate(a, b, 'full')
	print(np.correlate(a, b, 'valid'))
	xc = np.linspace(0,1,yc.size)
	plt.plot(xc,yc)#,'o',ms=0.4)
    def findInserts(self):
        #interpolate to the base phantom slice thickness
        #X=numpy.linspace(0,self.phBase.slicethk*(self.slices*self.slicethk/self.phBase.slicethk),(self.slices*self.slicethk)/self.phBase.slicethk+1)
        X=numpy.arange(0,self.slices*self.slicethk,self.phBase.slicethk)
        Xp=numpy.linspace(0,(self.slices-1)*self.slicethk,self.slices)

        profileResc=numpy.interp(X,Xp,self.profile)
        profileRescMirror=numpy.fliplr([profileResc,numpy.zeros(len(profileResc))])[0,:]

        #find order of acquisition
        fwdcor=numpy.correlate(self.phBase.profile[:,1],profileResc,'full')
        rwdcor=numpy.correlate(self.phBase.profile[:,1],profileRescMirror,'full')

        reverse=False
        if numpy.amax(fwdcor)>=numpy.amax(rwdcor):
            shift=numpy.argmax(fwdcor)
        else:
            reverse=True
            shift=numpy.argmax(rwdcor)

        #align profile and base profile
        #get index of slices
        Xcor=(X/self.phBase.slicethk)-len(X)+1+shift

        #find phantom slice nearest to base inserts
        Inserts=["resolution","sliceThk","uniform","dgp"]
        for insert in Inserts:
            if (Xcor==self.phBase.inserts[insert][0]).any() or (Xcor==self.phBase.inserts[insert][1]).any():
                f=max(self.phBase.inserts[insert][0],Xcor[0])
                s=min(self.phBase.inserts[insert][1],Xcor[len(Xcor)-1])
                self.inserts[insert]=numpy.round(((numpy.array([f,s])+len(X)-1-shift)*float(self.phBase.slicethk))/float(self.slicethk))
                if reverse:
                    self.inserts[insert]=numpy.abs(self.inserts[insert]-self.slices+1)
                    (self.inserts[insert]).sort()
示例#16
0
def msd_fast(trajs):
    T = trajs.shape[1]
    N = trajs.shape[0]

    trajs2 = trajs ** 2

    msd = np.zeros((T))

    for n in xrange(N):
        r2 = np.zeros((T))
        rtau2 = np.zeros((T))

        # compute sums over squares of positions for r(t) and r(t+tau)
        for tau in xrange(T):
            r2[tau] = np.sum(trajs2[n][: (T - tau), 0] + trajs2[n][: (T - tau), 1])
            rtau2[tau] = np.sum(trajs2[n][tau:, 0] + trajs2[n][tau:, 1])

        # compute auto correlation
        corx = np.correlate(trajs[n][:, 0], trajs[n][:, 0], mode="full")[T - 1 :]
        cory = np.correlate(trajs[n][:, 1], trajs[n][:, 1], mode="full")[T - 1 :]
        cor = corx + cory

        msd += (rtau2 - 2 * cor + r2) / np.arange(T, 0, -1)

    msd = msd / N

    return msd
示例#17
0
def plot_acorr(x, ax=None, title="", xlabel="Shift", ylabel="",
               append_analysis=True):
    """Plot the autocorrelation

    If variance is too small (i.e. for a deterministic process),
    falls back to plotting autocovariance
    """
    x_centered = x - np.mean(x)
    x_var = np.var(x)
    x_len = len(x)
    x_centered_sample = x_centered[:int(x_len//2)]
    if len(np.unique(x.round(decimals=12))) > 1:
        # compute autocorrelation
        x_acorr = np.correlate(x_centered, x_centered_sample, 'valid')/x_var
        analysis_mode = "Autocorrelation"
    else:
        # if process is deterministic, autocorrelation is undefined
        # use the autocovariance instead
        x_acorr = np.correlate(x_centered, x_centered_sample, 'valid') 
        analysis_mode = "Autocovariance"

    if ax is None:
        fig, ax = plt.subplots(nrows=1, figsize=(12,3))
    ax.plot(x_acorr[:100], 'o')
    if append_analysis:
        ax.set_title(title+analysis_mode)
    else:
        ax.set_title(title)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)
    limit_ylim(ax)
    return ax
示例#18
0
def aligndata(baselineremoved, brightest, pulsar):
    nbins = baselineremoved.shape[0]
    nprofiles = baselineremoved.shape[1]
    template = baselineremoved[:,brightest]
    # rotate template to put peak at 1/4
    peakbin = np.argmax(template)
    fixedlag = int(nbins/4)-peakbin
    aligned = np.zeros((nbins,nprofiles))
    newtemplate = np.roll(template, fixedlag)
    template = newtemplate
    plt.plot(newtemplate)
    plt.savefig('./{0}/{0}_brightest.png' .format(pulsar))
    plt.clf()
    for i in range(nprofiles):
        xcorr = np.correlate(template,baselineremoved[:,i],"full")
        lag = np.argmax(xcorr)
        aligned[:,i] = np.roll(baselineremoved[:,i],lag)
    template = np.median(aligned,1)
    # repeat with better template now and shift peak to 1/4 of the profile
    peakbin = np.argmax(template)
    fixedlag = int(nbins/4)-peakbin
    double = np.zeros(2*nbins)
    for i in range(nprofiles):
        double[0:nbins] = baselineremoved[:,i]
        double[nbins:2*nbins] = baselineremoved[:,i]
#        xcorr = np.correlate(template,baselineremoved[:,i],"full")
        xcorr = np.correlate(template,double,"full")
        lag = np.argmax(xcorr) + fixedlag
        aligned[:,i] = np.roll(baselineremoved[:,i],lag)
        newtemplate = np.median(aligned,1)
    return np.array(aligned), np.array(newtemplate)
示例#19
0
def dbpsk_demod(rx_data, sample_rate, L):
    print "Demodulating Data...@", sample_rate
    time_seq = np.arange(0, len(rx_data), 1, dtype=float) / sample_rate
    two_pi_fc_t = 2 * np.pi * CenterFreq * time_seq
    # Filter out-of-band noise
    rx_inband = np.convolve(rx_data, bp_filt)
    N = len(rx_inband)
    # Downconvert I/Q channels into baseband signals
    rx_bb_i = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.cos(two_pi_fc_t))
    rx_bb_q = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.sin(two_pi_fc_t))
    # Filter any high frequency remnants
    audio_bb_i = np.convolve(rx_bb_i, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
    audio_bb_q = np.convolve(rx_bb_q, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
    decoded_bits = np.zeros(L * BitsPerChar)
    # Previous Phase and decode bit
    pp = 0
    pb = 0
    detected_bitstream = np.zeros(L * BitsPerChar, dtype=int)
    T = SamplesPerSymbol
    # Matched filter is just a rectangular pulse
    rect_pulse = np.ones(T)
    for demod in np.arange(L * BitsPerChar):
        sym_i = np.correlate(audio_bb_i[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
        sym_q = np.correlate(audio_bb_q[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
        cp = np.arctan(sym_q / sym_i)
        # print "Phase Diff:", cp-pp
        if np.abs(cp - pp) > 0.1:
            detected_bitstream[demod] = pb ^ 1
        else:
            detected_bitstream[demod] = detected_bitstream[demod - 1]
        pb = detected_bitstream[demod]
        pp = cp

    return detected_bitstream
示例#20
0
def plotSpectrum(data,timeR,Fourier=1,Corr=1,index = array([5,6]),figname='a.pdf',ind=1):

    [ind1,ind2] = index
    k = min([60*5,len(data[:,0])/3])

    y = data[k:,ind1]
    x = data[k:,ind2]
    time = timeR[k:]

    if Fourier:
        figure(1)
        subplot(2,1,1)
        hold(True)
        plot(time,y)
        plot(time,x)
        xlabel('Time')
        ylabel('Amplitude')
        subplot(2,1,2)
        Y = fft(y)
        Y = Y[1:]
        n= len(Y)

        #Crazy math in here
        powerY = abs(Y[arange(0,floor(n/2),dtype='int')])**2
        nyquist = 1./2.
        freq = arange(0,n/2,dtype='float')/(n/2)*nyquist
        period=1./freq
        # a is the number of points per time step (Depends on the interpolation in process - interp)
        a = 1
        plot(period/a,powerY)
        ylabel('Power')
        xlabel('Period (Min/oscillation)')
        #savefig(''.join([figname,".png"]), bbox_inches=0 ,dpi=100)


    if Corr:
        modeC = "same"
        x = (x - mean(x))/std(x)
        y =  (y - mean(y))/std(y)

        timeInt = time[1]-time[0]
        numPoints = len(time)

        fig2 = figure(2)
        fig2.set_size_inches(6*0.9,3.7*0.9*0.906)
        fig2.set_facecolor('white')

        ax2 = fig2.add_subplot(1,1,ind)
        n= correlate(y,y,modeC)
        d= correlate(y,x,modeC)
        lagR = (nonzero(n == max(n))[0] - nonzero(d == max(d))[0])*timeR[1]
        title("CC Act-DS %f Max corr: %f" %(lagR, max(d)/numPoints))
        ax2.plot(linspace(len(x)/2*timeInt,-len(x)/2*timeInt,len(x)),d/numPoints)
        xlim([-180,180])
        #ylim([-0.25,0.75])

        #savefig(''.join([figname,".png"]), bbox_inches=0 ,dpi=100)

    show()
def get_scientist_series_from_txt(scientist_dict, dir):
    series_dict = {}
    correlation_list = []
    for scientist in scientist_dict:
        year_dict = {}
        scientist_series = []
        scientist = scientist.rstrip().split('/')[-1]
        filename = os.path.join(dir + '\\' + scientist + '.txt')
        try:
            f = open(filename)
            for line in f:
                time_list = map(float, line.split(','))
                year = int(time_list.pop(0))
                if  year>2004 and year<2016:
                    year_dict.update({year:len(time_list)}) 
                    scientist_series += time_list 
            f.close()
        except IOError:
            #    print scientist
            continue
        if year_dict!={}:
            start_year = min(list(year_dict.keys()))
            end_year = max(list(year_dict.keys()))
            d = datetime.date(start_year, 1, 1)
            # 0 = Monday, 1=Tuesday, 2=Wednesday... We use Sunday as in Google Trends csv file every week starts from Sunday
            start_point = str(start_year) + '-01-01' 
            end_point = datetime.datetime(int(end_year), 1, 1) + datetime.timedelta(year_dict[end_year]-1)
            #Generate periods
            rng = pd.date_range(start_point, end_point, freq='D')
            #print scientist, start_point, len(rng)
            ts = pd.Series(scientist_series, index=rng)
            param_ts = numpy.array(ts.asfreq('W', method='pad').values, dtype = float)
            gt_series = get_google_trends_series(scientist)
            if gt_series!=[]:
                rng = pd.date_range('2004-01-04', '2016-06-26', freq='W')
                ts = pd.Series(gt_series, index=rng)
                rng = pd.date_range(start_point, end_point, freq='W')
                gtrends_ts = numpy.array(pd.Series(ts, index=rng).values, dtype = float)
                param_ts = running_mean(param_ts, 4)
                gtrends_ts = running_mean(gtrends_ts, 4)
               
# Plotting            
                plt.figure()
                plt.title(scientist)
                plt.plot(param_ts, label = 'edits')
                plt.plot(gtrends_ts, label = 'google_trends')
                plt.legend(loc='upper left')
                
                param_ts = (param_ts - numpy.mean(param_ts)) / (numpy.std(param_ts)* len(param_ts))
                gtrends_ts = (gtrends_ts - numpy.mean(gtrends_ts)) /  (numpy.std(gtrends_ts) )
                print scientist, numpy.correlate(param_ts, gtrends_ts)[0]
                correlation_list.append(numpy.correlate(param_ts, gtrends_ts)[0])

    
    print 'average correlation', numpy.mean(numpy.absolute(correlation_list))
    print 'min correlation', format(numpy.min(numpy.absolute(correlation_list)),'f')
    print 'max correlation',format(numpy.max(numpy.absolute(correlation_list)),'f')
    plt.show()
    return
 def adjust_for_phase_and_lag(self):
     # store a list of the l2-norms and the lags for each phase value
     norm_list = np.zeros((self.phases_vec.size,2))
     
     # loop through each phase value
     for pp in range(self.phases_vec.size):
         y = self.cur_signal_up.copy()
         y = y*np.exp(1j*self.phases_vec[pp])
         
         # Adjust for magnitude setting
         if self.sig_type == 'm':
             y = self.sig_mag(y)
         
         # compute autocorrelation
         if self.filter_on:
             cur_corr = np.correlate(y,self.avg_complex_cir,mode='full')
         else:
             cur_corr = np.correlate(y,self.ref_complex_cir,mode='full')
         opt_lag = -self.lag_vec[np.argmax(self.sig_mag(cur_corr)).flatten()[0]]
         norm_list[pp,0] = opt_lag
         
         # Shift the signal to adjust for any lag
         if opt_lag > 0:
             y = np.array(((0.+1j*0.)*np.ones(opt_lag)).tolist() + y[0:-opt_lag].tolist())
         elif opt_lag < 0:
             y = np.array(y[-opt_lag:].tolist() + ((0.+1j*0.)*np.ones(-opt_lag)).tolist())
         
         # Adjust for magnitude setting
         if self.sig_type == 'm':
             y = self.sig_mag(y)
         
         # Compute the l2-norm
         if self.filter_on:
             tmp = y - self.avg_complex_cir
         else:
             tmp = y - self.ref_complex_cir
         
         # Save l2-norm to list
         norm_list[pp,1] = self.sig_mag(tmp).sum()
     
     # Get the index of the smallest l2-norm
     min_idx = np.argmin(norm_list[:,1]).flatten()[0]
     
     # Adjust for phase and lag
     y = self.cur_signal_up.copy()
     y = y*np.exp(1j*self.phases_vec[min_idx])
     opt_lag = norm_list[min_idx,0]
     
     # Shift the signal to adjust for any lag
     if opt_lag > 0:
         self.cur_signal_up = np.array(((0+1j*0)*np.ones(opt_lag)).tolist() + y[0:-opt_lag].tolist())
     elif opt_lag < 0:
         self.cur_signal_up = np.array(y[-opt_lag:].tolist() + ((0+1j*0)*np.ones(-opt_lag)).tolist())
     else:
         self.cur_signal_up = y.copy()
         
     # Adjust for magnitude setting
     if self.sig_type == 'm':
         self.cur_signal_up = self.sig_mag(self.cur_signal_up)
示例#23
0
文件: oned.py 项目: zkbt/zachopy
def acf(y):
	'''Calculate the autocorrelation function of an array,
		returning an array of lags and an array with the acf.'''

	a = np.correlate(y,y,'full')
	trimmed = a[len(a)/2:]
	lag = np.arange(len(trimmed))
	return lag, trimmed/np.correlate(y,y)
示例#24
0
def sync_index_correlate( a, b ):
    a = a - a.mean( )
    b = b - b.mean( )
    a = a / a.max()
    b = b / b.max( )
    same = np.correlate( a, a )[0]
    other = np.correlate( a, b)[0]
    return other / same
示例#25
0
文件: TP2.py 项目: zwang04/EDTS
def correlate(X, Y):
    m = X.size
    n = Y.size
    correction = np.correlate(np.ones((m,)), np.ones((n,)), "full")
    rXY = np.correlate(X, Y, "full")
    rXYc = rXY / correction
    rXYC = rXYc[m - 1 :]
    return rXYc
示例#26
0
文件: makeFilters.py 项目: bmazin/SDR
def makeCausalWiener(template, rawPulse, nTaps=50):
    #template = template[tempOffs:tempOffs+nTaps*2]
    #rawPulse = rawPulse[tempOffs:tempOffs+nTaps*2]
    rawPulse = rawPulse/np.max(np.abs(rawPulse))
    crossCorr = np.correlate(template, rawPulse[0:-nTaps+1])
    autoCorr = np.correlate(rawPulse, rawPulse[0:-nTaps+1])
    firCoeffs = sp.linalg.solve_toeplitz(autoCorr, crossCorr) 
    return firCoeffs/np.max(np.abs(firCoeffs))
示例#27
0
def max_ccors(X):
    N = np.correlate(np.ones(X.shape[1]),np.ones(X.shape[1]),'full')
    ac = [np.correlate(X[i],X[i])[0] for i in range(X.shape[0])]
    mcc = []
    for i in range(X.shape[0]):
        for j in range(i):
            cc = np.correlate(X[i],X[j],'full')/N*1./np.sqrt(ac[i]*ac[j]+1e-6)
            mcc.append(cc.max())
    return np.array(mcc)
示例#28
0
def parameter_autocor(sessions, population_fit, param = 'side'):
    ''' Evaluate within and cross subject variability in 
    specified parameter and autocorrelation across sessions.
    '''

    assert len(population_fit['MAP_fits']) == len(sessions), \
        'Population fit does not match number of sessions.'

    param_index = population_fit['param_names'].index(param)
    for i, MAP_fit in enumerate(population_fit['MAP_fits']):
        sessions[i].side_loading = MAP_fit['params_U'][param_index]

    sIDs = list(set([s.subject_ID for s in sessions]))

    p.figure(1)
    p.clf()
    p.subplot2grid((2,2),(0,0), colspan = 2)
    subject_means = [] 
    subject_SDs = []
    cor_len = 20
    subject_autocorrelations = np.zeros([len(sIDs), 2 * cor_len + 1])
    subject_shuffled_autocor = np.zeros([len(sIDs), 2 * cor_len + 1, 1000])
    for i, sID in enumerate(sIDs):
        a_sessions = sorted([s for s in sessions if s.subject_ID == sID],
                            key = lambda s:s.day)
        sl = [s.side_loading for s in a_sessions]
        p.plot(sl)
        subject_means.append(np.mean(sl))
        subject_SDs.append(np.std(sl))
        sl = (np.array(sl) - np.mean(sl)) / np.std(sl)
        autocor = np.correlate(sl, sl, 'full') / len(sl)
        subject_autocorrelations[i,:] = autocor[autocor.size/2 - cor_len:
                                                autocor.size/2 + cor_len + 1]
        for j in range(1000):
            shuffle(sl)
            autocor = np.correlate(sl, sl, 'full') / len(sl)
            subject_shuffled_autocor[i,:,j] = autocor[autocor.size/2 - cor_len:
                                                autocor.size/2 + cor_len + 1]


    mean_shuffled_autocors = np.mean(subject_shuffled_autocor,0)
    mean_shuffled_autocors.sort(1)

    p.xlabel('Day')
    p.ylabel('Subject rotational bias')
    p.subplot2grid((2,2),(1,0))
    p.fill_between(range(-cor_len, cor_len + 1),mean_shuffled_autocors[:,10],
                   mean_shuffled_autocors[:,-10], color = 'k', alpha = 0.2)
    p.plot(range(-cor_len, cor_len + 1),np.mean(subject_autocorrelations,0),'b.-', markersize = 5)
    p.xlabel('Lag (days)')
    p.ylabel('Correlation')
    p.subplot2grid((2,2),(1,1))
    p.bar([0.5,1.5], [np.mean(subject_SDs), np.sqrt(np.var(subject_means))])
    p.xticks([1,2], ['Within subject', 'Cross subject'])
    p.xlim(0.25,2.5)
    p.ylabel('Standard deviation')
示例#29
0
    def test_correlation(self):
        numpy.set_printoptions(precision=3,suppress=True)
        # Create 2 vectors of scores, zero everywhere except a random position
        N = 10
        x = numpy.zeros(N)
        y = numpy.zeros(N)
        xpeak = numpy.random.randint(0,N)
        ypeak = numpy.random.randint(0,N)
        x[xpeak] = 10
        y[ypeak] = 10
        x = (x-numpy.mean(x))/numpy.std(x)
        y = (y-numpy.mean(y))/numpy.std(y)

        # Make tracks out of them and compute cross-correlation with our own function
        X = [('chr',k,k+1,s) for k,s in enumerate(x)]
        Y = [('chr',k,k+1,s) for k,s in enumerate(y)]
        X = fstream(iter(X),fields=['chr','start','end','score'])
        Y = fstream(iter(Y),fields=['chr','start','end','score'])
        corr = correlation([X,Y], regions=(0,N))#, limits=[-N+1,N-1])

        # Compute cross-correlation "by hand" and using numpy.correlate(mode='valid')
        raw = []
        np_corr_valid = []
        for k in range(N):
            """
            X         |- - - - -|          k=0
            Y              <- |- - - - -|
            up to
            X         |- - - - -|          k=4
            Y         |- - - - -|
            """
            raw.append(numpy.dot(x[-k-1:],y[:k+1]) / N)
            np_corr_valid.extend(numpy.correlate(x[-k-1:],y[:k+1],mode='valid'))
        for k in range(N-1,0,-1):
            """
            X         |- - - - -|          k=4
            Y    <- |- - - - -|
            up to
            X         |- - - - -|          k=1
            Y |- - - - -|
            """
            raw.append(numpy.dot(x[:k],y[-k:]) / N)
            np_corr_valid.extend(numpy.correlate(x[:k],y[-k:],mode='valid'))

        # Compute cross-correlation using numpy.correlate(mode='full')
        np_corr_full = numpy.correlate(x,y,mode="full")[::-1] / N
        np_corr_valid = numpy.asarray(np_corr_valid) / N

        # Test if all methods yield the same result
        assert_almost_equal(corr, numpy.asarray(raw))
        assert_almost_equal(corr, np_corr_full)
        assert_almost_equal(corr, np_corr_valid)

        # Test if the lag between the two tracks is correcty detected
        self.assertEqual(numpy.argmax(corr)-(N-1), ypeak-xpeak)
def correlate_envelope_signal(signal, amp_envelope, n_surrogates=100,
                              random_state=None):
    """ Correlate the amplitude envelope with the signal.

    Parameters
    ----------
    signal : ndarrray (n_times)
        Low freq filtered signal.
    amp_envelope: ndarray (n_times)
        Amplitude envelope of high freq signal.
    n_surrogates : int
        Number of surrogates to be computed.
    random_state : int
        Seed value for random generator.

    Returns
    -------
    xcorr : ndarray (n_times)
        Cross correlation of the two signals.
    xcorr_surrogates : ndarray (n_surrogates)
        Cross correlation of the surrogate signals.
    max_surr : ndarray (n_surrogates)
        Maximum value of surrogate cross correlation.
    z_threshold : float
        Threshold value after z-scoring.
    """

    from sklearn.utils import check_random_state

    xcorr = np.correlate(signal, amp_envelope, 'full')
    xcorr_surrogates = np.zeros((n_surrogates, xcorr.size))
    max_surr = np.zeros((n_surrogates, 1))

    rng = check_random_state(random_state)  # initialize random generator
    for i in range(0, n_surrogates):
        order = np.argsort(rng.randn(len(amp_envelope)))
        xcorr_surrogates[i, :] = np.correlate(signal, amp_envelope[order], 'full')
        max_surr[i] = np.max(np.abs(xcorr_surrogates[i, :]))

    # compute some statistics
    #NOTE Needs to be rechecked. I want to check if the xcorr values
    # can come from the surrogates values (i.e. reject the null hypothesis
    # that xcorr computed is random.
    max_surr_mean = np.mean(max_surr)
    max_surr_std = np.std(max_surr)
    # compute zscores
    zscores = (xcorr - max_surr_mean) / max_surr_std
    from scipy import stats
    p_values = stats.norm.pdf(zscores)
    # perform fdr correction and compute threshold
    accept, _ = mne.stats.fdr_correction(p_values, alpha=0.001)
    z_threshold = np.abs(zscores[accept]).min()

    return xcorr, xcorr_surrogates, max_surr, z_threshold
示例#31
0
def spec_flex_shift(obj_skyspec, arx_skyspec, mxshft=20):
    """ Calculate shift between object sky spectrum and archive sky spectrum

    Args:
        obj_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
            Spectrum of the sky related to our object
        arx_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
            Archived sky spectrum
        mxshft (float, optional):
            Maximum allowed shift from flexure;  note there are cases that
            have been known to exceed even 30 pixels..

    Returns:
        dict: Contains flexure info
    """

    # TODO None of these routines should have dependencies on XSpectrum1d!

    # Determine the brightest emission lines
    msgs.warn("If we use Paranal, cut down on wavelength early on")
    arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig \
            = arc.detect_lines(arx_skyspec.flux.value)
    obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj \
            = arc.detect_lines(obj_skyspec.flux.value)

    # Keep only 5 brightest amplitude lines (xxx_keep is array of
    # indices within arx_w of the 5 brightest)
    arx_keep = np.argsort(arx_amp[arx_w])[-5:]
    obj_keep = np.argsort(obj_amp[obj_w])[-5:]

    # Calculate wavelength (Angstrom per pixel)
    arx_disp = np.append(
        arx_skyspec.wavelength.value[1] - arx_skyspec.wavelength.value[0],
        arx_skyspec.wavelength.value[1:] - arx_skyspec.wavelength.value[:-1])
    obj_disp = np.append(
        obj_skyspec.wavelength.value[1] - obj_skyspec.wavelength.value[0],
        obj_skyspec.wavelength.value[1:] - obj_skyspec.wavelength.value[:-1])

    # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
    # this? can just use sigmas
    arx_idx = (arx_cent + 0.5).astype(
        np.int)[arx_w][arx_keep]  # The +0.5 is for rounding
    arx_res = arx_skyspec.wavelength.value[arx_idx]/\
              (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
    obj_idx = (obj_cent + 0.5).astype(
        np.int)[obj_w][obj_keep]  # The +0.5 is for rounding
    obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
              (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])

    if not np.all(np.isfinite(obj_res)):
        msgs.warn(
            'Failed to measure the resolution of the object spectrum, likely due to error '
            'in the wavelength image.')
        return None
    msgs.info("Resolution of Archive={0} and Observation={1}".format(
        np.median(arx_res), np.median(obj_res)))

    # Determine sigma of gaussian for smoothing
    arx_sig2 = np.power(arx_disp[arx_idx] * arx_wid[arx_w][arx_keep], 2)
    obj_sig2 = np.power(obj_disp[obj_idx] * obj_wid[obj_w][obj_keep], 2)

    arx_med_sig2 = np.median(arx_sig2)
    obj_med_sig2 = np.median(obj_sig2)

    if obj_med_sig2 >= arx_med_sig2:
        smooth_sig = np.sqrt(obj_med_sig2 - arx_med_sig2)  # Ang
        smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix * 2 *
                                               np.sqrt(2 * np.log(2)))
    else:
        msgs.warn("Prefer archival sky spectrum to have higher resolution")
        smooth_sig_pix = 0.
        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
        #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)

    #Determine region of wavelength overlap
    min_wave = max(np.amin(arx_skyspec.wavelength.value),
                   np.amin(obj_skyspec.wavelength.value))
    max_wave = min(np.amax(arx_skyspec.wavelength.value),
                   np.amax(obj_skyspec.wavelength.value))

    #Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
    #    if np.median(obj_res) >= np.median(arx_res):
    #        msgs.warn("New Sky has higher resolution than Archive.  Not smoothing")
    #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
    #    else:
    #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
    #        arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
    #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)

    # Define wavelengths of overlapping spectra
    keep_idx = np.where((obj_skyspec.wavelength.value >= min_wave)
                        & (obj_skyspec.wavelength.value <= max_wave))[0]
    #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]

    #Rebin both spectra onto overlapped wavelength range
    if len(keep_idx) <= 50:
        msgs.warn("Not enough overlap between sky spectra")
        return None

    # rebin onto object ALWAYS
    keep_wave = obj_skyspec.wavelength[keep_idx]
    arx_skyspec = arx_skyspec.rebin(keep_wave)
    obj_skyspec = obj_skyspec.rebin(keep_wave)
    # Trim edges (rebinning is junk there)
    arx_skyspec.data['flux'][0, :2] = 0.
    arx_skyspec.data['flux'][0, -2:] = 0.
    obj_skyspec.data['flux'][0, :2] = 0.
    obj_skyspec.data['flux'][0, -2:] = 0.

    # Normalize spectra to unit average sky count
    norm = np.sum(obj_skyspec.flux.value) / obj_skyspec.npix
    obj_skyspec.flux = obj_skyspec.flux / norm
    norm2 = np.sum(arx_skyspec.flux.value) / arx_skyspec.npix
    arx_skyspec.flux = arx_skyspec.flux / norm2
    if norm < 0:
        msgs.warn("Bad normalization of object in flexure algorithm")
        msgs.warn("Will try the median")
        norm = np.median(obj_skyspec.flux.value)
        if norm < 0:
            msgs.warn("Improper sky spectrum for flexure.  Is it too faint??")
            return None
    if norm2 < 0:
        msgs.warn(
            'Bad normalization of archive in flexure. You are probably using wavelengths '
            'well beyond the archive.')
        return None

    # Deal with bad pixels
    msgs.work("Need to mask bad pixels")

    # Deal with underlying continuum
    msgs.work("Consider taking median first [5 pixel]")
    everyn = obj_skyspec.npix // 20
    bspline_par = dict(everyn=everyn)
    mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value,
                                    obj_skyspec.flux.value,
                                    3,
                                    function='bspline',
                                    sigma=3.,
                                    bspline_par=bspline_par)
    obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')
    obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
    mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value,
                                        arx_skyspec.flux.value,
                                        3,
                                        function='bspline',
                                        sigma=3.,
                                        bspline_par=bspline_par)
    arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value,
                                  'bspline')
    arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont

    # Consider sharpness filtering (e.g. LowRedux)
    msgs.work("Consider taking median first [5 pixel]")

    #Cross correlation of spectra
    #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
    corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")

    #Create array around the max of the correlation function for fitting for subpixel max
    # Restrict to pixels within maxshift of zero lag
    lag0 = corr.size // 2
    #mxshft = settings.argflag['reduce']['flexure']['maxshift']
    max_corr = np.argmax(corr[lag0 - mxshft:lag0 + mxshft]) + lag0 - mxshft
    subpix_grid = np.linspace(max_corr - 3., max_corr + 3., 7)

    #Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits
    if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):
        fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)],
                             'polynomial', 2)
        success = True
        max_fit = -0.5 * fit[1] / fit[2]
    else:
        fit = utils.func_fit(subpix_grid, 0.0 * subpix_grid, 'polynomial', 2)
        success = False
        max_fit = 0.0
        msgs.warn('Flexure compensation failed for one of your objects')

    #Calculate and apply shift in wavelength
    shift = float(max_fit) - lag0
    msgs.info("Flexure correction of {:g} pixels".format(shift))
    #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]

    return dict(polyfit=fit,
                shift=shift,
                subpix=subpix_grid,
                corr=corr[subpix_grid.astype(np.int)],
                sky_spec=obj_skyspec,
                arx_spec=arx_skyspec,
                corr_cen=corr.size / 2,
                smooth=smooth_sig_pix,
                success=success)
示例#32
0
 def time_correlate(self, size1, size2, mode):
     np.correlate(self.x1, self.x2, mode=mode)
示例#33
0
def calc_offset_plot(filename, resolution=1, scale=1.0):

    Load(Filename='/SNS/CORELLI/IPTS-12008/nexus/' + filename + '.nxs.h5',
         OutputWorkspace=filename,
         LoadMonitors='1',
         MonitorsAsEvents='1')
    LoadInstrument(Workspace=filename,
                   Filename='/SNS/users/rwp/CORELLI_Definition.xml')
    ScaleX(InputWorkspace=filename + '_monitors',
           OutputWorkspace=filename + '_monitors',
           Factor=str(scale))
    Rebin(InputWorkspace=filename + '_monitors',
          OutputWorkspace=filename + '_monitors',
          Params=str(resolution))

    w = mtd[filename]
    sequence = map(
        float,
        w.getInstrument().getComponentByName(
            'correlation-chopper').getStringParameter('sequence')[0].split())
    freq = round(
        w.getRun().getProperty("BL9:Chop:Skf4:MotorSpeed").timeAverageValue())
    delay = w.getRun().getProperty(
        "BL9:Chop:Skf4:PhaseTimeDelaySet").timeAverageValue()

    print filename, 'MotorSpeed =', freq, 'Hz', 'PhaseTimeDelaySet =', delay, 'uS'

    if freq % 60 != 0:
        print 'Frequency not a multiple of 60.'
        return

    sequence2 = sequence
    for i in range(int(freq / 60) - 1):
        sequence2 = np.append(sequence2, sequence)

    m = mtd[filename + '_monitors']
    x = m.extractX()[1]
    y = m.extractY()[1]

    s = np.cumsum(sequence2)
    chopper = np.zeros(len(x) - 1)
    l = len(chopper)

    for n in range(l):
        if np.searchsorted(s, (
            (x[n] + x[n + 1]) / 2 / 16666.67) * 360. * freq / 60.) % 2 == 1:
            chopper[n] = 1

    chopper2 = np.zeros(len(chopper) * 2)
    chopper2 = np.append(chopper, chopper)

    #y=np.roll(chopper,1337)

    corr = np.correlate(y, chopper2)
    r = np.argmax(corr)
    r2 = (x[r] + x[r + 1]) / 2

    #print x[-1]
    print "Chopper sequence offset = ", r2, "uS"

    chopper = np.roll(chopper, r)

    chopper[0] = 0
    chopper[-1] = 0

    lb = 2500
    ub = 13500
    fig = plt.figure(1, figsize=(14, 6))
    plt.title(filename + ' (' + str(freq) + 'Hz)')
    #plt.plot(x[lb:ub],y[lb:ub])
    #plt.plot(x[lb:ub],chopper[lb:ub]*y.max()*0.5)
    plt.plot(x[:-1], y)
    plt.plot(x[:-1], chopper * y.max() * 0.5)
    plt.xlabel('ToF (uS)')
    plt.xlim([lb, ub])
    fig.savefig('/SNS/users/rwp/' + filename + '.png')
    fig.clf()

    #chopper[lb]=0.0
    #chopper[ub-1]=0.0
    fig = plt.figure(1, figsize=(14, 6))
    plt.title(filename + ' (' + str(freq) + 'Hz)')
    #plt.fill(x[lb:ub],chopper[lb:ub]*y.max(),color = '0.85')
    #plt.plot(x[lb:ub],y[lb:ub])
    plt.fill(x[:-1], chopper * y.max(), color='0.85')
    plt.plot(x[:-1], y)
    plt.xlabel('ToF (uS)')
    plt.xlim([lb, ub])
    fig.savefig('/SNS/users/rwp/' + filename + '_fill.png')
    fig.clf()

    lb = 5000
    ub = 8000
    fig = plt.figure(1, figsize=(14, 6))
    plt.title(filename + ' (' + str(freq) + 'Hz)')
    plt.plot(x[:-1], y)
    plt.plot(x[:-1], chopper * y.max() * 0.5)
    plt.xlabel('ToF (uS)')
    plt.xlim([lb, ub])
    fig.savefig('/SNS/users/rwp/' + filename + '_2.png')
    fig.clf()

    #chopper[lb]=0.0
    #chopper[ub-1]=0.0
    fig = plt.figure(1, figsize=(14, 6))
    plt.title(filename + ' (' + str(freq) + 'Hz)')
    plt.fill(x[:-1], chopper * y.max(), color='0.85')
    plt.plot(x[:-1], y)
    plt.xlabel('ToF (uS)')
    plt.xlim([lb, ub])
    fig.savefig('/SNS/users/rwp/' + filename + '_fill_2.png')
    fig.clf()

    Rebin(InputWorkspace=filename + '_monitors',
          OutputWorkspace=filename + '_monitors',
          Params='5')
    Load(Filename=r'/SNS/CORELLI/IPTS-12008/nexus/CORELLI_306.nxs.h5',
         OutputWorkspace='CORELLI_306',
         LoadMonitors='1',
         MonitorsAsEvents='1')
    ScaleX(InputWorkspace='CORELLI_306_monitors',
           OutputWorkspace='CORELLI_306_monitors',
           Factor=str(scale))
    Rebin(InputWorkspace='CORELLI_306_monitors',
          OutputWorkspace='CORELLI_306_monitors',
          Params='5')
    Divide(LHSWorkspace=filename + '_monitors',
           RHSWorkspace='CORELLI_306_monitors',
           OutputWorkspace='out',
           WarnOnZeroDivide='0')
    norm = mtd['out']
    #ox=norm.extractX()[1]
    #oy=norm.extractY()[1]

    #lb=3000
    #ub=13000
    #fig = plt.figure(1,figsize=(14,6))
    #plt.title(filename+' ('+str(freq)+'Hz) Normalized')
    #plt.fill(x[lb:ub],chopper[lb:ub]*y[lb:ub].max(),color = '0.85')
    #plt.plot(ox[lb:ub],oy[lb:ub])
    #plt.xlabel('ToF (uS)')
    #plt.xlim([lb,ub])
    #fig.savefig('/SNS/users/rwp/'+filename+'_norm.png')
    #fig.clf()

    #return (freq,delay,r2,corr.max())
    return (x, y, chopper)
示例#34
0
def main():
    '''Main Driver routine for pdcompare - reads a collection of spreadsheets with spectra and compares the spectra'''
    def show_usage():
        '''Show pdcompare usage'''
        print('pdcompare file1 file2...')
        print(
            'pdcompare: Calculates the cross-correlation of spectra calculated using pdielec'
        )
        print(
            '         -column column  Take the data for cross correlation from column'
        )
        print('                         C Averaged')
        print('                         D MG      ')
        print('                         E Mie/0.1 ')
        print('                         F Mie/1.0 ')
        print('                         G Mie/2.0 ')
        print('                         H Mie/3.0 ')
        print('         -rmax rmax  Use rows from rmin to rmax              ')
        print(
            '         -rmin rmin  Use rows from rmin to rmax (rows start from 2)'
        )
        print('         -sheet [molar/absorption/real/imaginary/atr]')
        print('         -excel filename')
        return

    # check usage
    if len(sys.argv) <= 1:
        show_usage()
        exit()

    sheet_dict = {
        "molar": "Molar Absorption",
        "absorption": "Absorption",
        "real": "Real Permittivity",
        "imaginary": "Imaginary Permittivity",
        "atr": "ATR Reflectance",
    }
    # Begin processing of command line
    command_line = ' '.join(sys.argv)
    tokens = sys.argv[1:]
    ntokens = len(tokens)
    itoken = 0
    names = []
    rmin = 0
    rmax = 0
    column = 'D'
    sheet = 'molar'
    excefile = ''
    while itoken < ntokens:
        token = tokens[itoken]
        if token == '-rmin':
            itoken += 1
            rmin = int(tokens[itoken])
        elif token == '-rmax':
            itoken += 1
            rmax = int(tokens[itoken])
        elif token == '-excel':
            itoken += 1
            excelfile = tokens[itoken]
        elif token == '-column':
            itoken += 1
            column = tokens[itoken]
        elif token == '-sheet':
            itoken += 1
            sheet = tokens[itoken]
        else:
            names.append(token)
        itoken += 1
        # end loop over tokens

    if len(names) <= 0:
        print('No files were specified')
        show_usage()
        exit(1)

    print('Comparison based on ', sheet, sheet_dict[sheet])
    sheet = sheet_dict[sheet]
    print('Comparision of spectra based on column: ', column)
    if excelfile != '':
        print('Output will be sent to the excel file: ', excelfile)
    size = len(names)
    columns = []
    lags = np.zeros((size, size))
    correlations = np.eye(size)
    # Use the first file name to define the frequency range
    # and the range of rows to be treated
    wb1 = load_workbook(filename=names[0], read_only=True)
    ws1 = wb1[sheet]
    max_rows1 = ws1.max_row
    max_cols1 = ws1.max_column
    print('Maximum number of rows', max_rows1)
    print('Maximum number of cols', max_cols1)
    # rmax and rmin are set by the first spread sheet
    if rmin == 0:
        rmin = 2
    if rmax == 0:
        rmax = max_rows1
    range1 = '{}{}'.format('B', rmin)
    range2 = '{}{}'.format('B', rmax)
    frequencies = np.array([[i.value for i in j] for j in ws1[range1:range2]])
    frequencies = frequencies[:, 0]
    freq_scale = (frequencies[1] - frequencies[0])
    print('Frequencies', frequencies)
    print('Frequency scale', freq_scale)
    # Go through the file names and store the required column of numbers
    for f1_name in names:
        print('Loading work book ', f1_name)
        wb1 = load_workbook(filename=f1_name, read_only=True)
        # print('Work sheet names for ',f1_name)
        # print(wb1.get_sheet_names())
        ws1 = wb1[sheet]
        range1 = '{}{}'.format(column, rmin)
        range2 = '{}{}'.format(column, rmax)
        col1 = np.array([[i.value for i in j] for j in ws1[range1:range2]])
        # Convert to a 1D array
        col1 = col1[:, 0]
        # Store the normalised signal
        col1 = (col1 - np.mean(col1)) / (np.std(col1) * np.sqrt(len(col1)))
        columns.append(col1)
        # print(columns[-1])
    # Print the new row min and max
    print('Final rmin is ', rmin)
    print('Final rmax is ', rmax)
    for i, col1 in enumerate(columns):
        for j, col2 in enumerate(columns):
            if i > j:
                #print('Correlation',i,j)
                # Calculate correlation using same and full seem to produce the same results
                correlation = np.correlate(col1, col2, mode='full')
                lag = np.argmax(correlation) - (len(correlation) - 1) / 2
                #print('Old lag',lag,len(correlation))
                lags[i, j] = lag
                lags[j, i] = lags[i, j]
                correlations[i, j] = np.max(correlation)
                correlations[j, i] = correlations[i, j]
                #print(correlation)
                #print('Lag = ',lags[i,j])
                #print('Maximum = ',correlations[i,j])
        # end for j,f2
    # end for i, f1
    print('Lags (steps)')
    print(lags)
    lags = freq_scale * lags
    print('Lags (cm-1)')
    print(lags)
    print('correlations')
    print(correlations)
    if excelfile != "":
        import xlsxwriter as xlsx
        workbook = xlsx.Workbook(excelfile)
        worksheet = workbook.add_worksheet('Lags_cm1')
        for i, name1 in enumerate(names):
            worksheet.write(0, i + 1, name1)
            worksheet.write(i + 1, 0, name1)
            for j, name2 in enumerate(names):
                worksheet.write(j + 1, i + 1, lags[j, i])
                worksheet.write(i + 1, j + 1, lags[i, j])
        worksheet = workbook.add_worksheet('Correlations')
        for i, name1 in enumerate(names):
            worksheet.write(0, i + 1, name1)
            worksheet.write(i + 1, 0, name1)
            for j, name2 in enumerate(names):
                worksheet.write(j + 1, i + 1, correlations[j, i])
                worksheet.write(i + 1, j + 1, correlations[i, j])
        print('Finished write the spread sheet to ', excelfile)
        workbook.close()
示例#35
0
def autocorrelation(x):
    n = x.shape[0]
    x = x - x.mean()
    r = np.correlate(x, x, mode='full')[-n:]
    return r / (x.var() * np.arange(n, 0, -1))
示例#36
0
 def corr_preamble(self, buf):
     corr = np.correlate(buf, self.search_preamble)
     return np.argmax(corr)
 def test_twosided(self):
     '''Test the two-sided version of ``corr``.'''
     a = np.arange(10)
     c_cpp = asignal.acorr(a, mode='twosided', max_lag=5)
     c_np = np.correlate(a, a, mode='full')[::-1][a.size - 6:a.size + 5]
     np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
示例#38
0
def pltxcorr(self,
             x,
             y,
             normed=True,
             detrend=detrend_none,
             usevlines=True,
             maxlags=10,
             **kwargs):
    """
    call signature::

        def xcorr(self, x, y, normed=True, detrend=detrend_none,
          usevlines=True, maxlags=10, **kwargs):

    Plot the cross correlation between *x* and *y*.  If *normed* =
    *True*, normalize the data by the cross correlation at 0-th
    lag.  *x* and y are detrended by the *detrend* callable
    (default no normalization).  *x* and *y* must be equal length.

    Data are plotted as ``plot(lags, c, **kwargs)``

    Return value is a tuple (*lags*, *c*, *line*) where:

      - *lags* are a length ``2*maxlags+1`` lag vector

      - *c* is the ``2*maxlags+1`` auto correlation vector

      - *line* is a :class:`~matplotlib.lines.Line2D` instance
         returned by :func:`~matplotlib.pyplot.plot`.

    The default *linestyle* is *None* and the default *marker* is
    'o', though these can be overridden with keyword args.  The
    cross correlation is performed with :func:`numpy.correlate`
    with *mode* = 2.

    If *usevlines* is *True*:

       :func:`~matplotlib.pyplot.vlines`
       rather than :func:`~matplotlib.pyplot.plot` is used to draw
       vertical lines from the origin to the xcorr.  Otherwise the
       plotstyle is determined by the kwargs, which are
       :class:`~matplotlib.lines.Line2D` properties.

       The return value is a tuple (*lags*, *c*, *linecol*, *b*)
       where *linecol* is the
       :class:`matplotlib.collections.LineCollection` instance and
       *b* is the *x*-axis.

    *maxlags* is a positive integer detailing the number of lags to show.
    The default value of *None* will return all ``(2*len(x)-1)`` lags.

    **Example:**

    :func:`~matplotlib.pyplot.xcorr` above, and
    :func:`~matplotlib.pyplot.acorr` below.

    **Example:**

    .. plot:: mpl_examples/pylab_examples/xcorr_demo.py
    """

    Nx = len(x)
    if Nx != len(y):
        raise ValueError('x and y must be equal length')

    x = detrend(np.asarray(x))
    y = detrend(np.asarray(y))

    c = np.correlate(x, y, mode=2)

    if normed:
        c /= np.sqrt(np.dot(x, x) * np.dot(y, y))

    if maxlags is None:
        maxlags = Nx - 1

    if maxlags >= Nx or maxlags < 1:
        raise ValueError('maxlags must be None or strictly '
                         'positive < %d' % Nx)

    lags = np.arange(-maxlags, maxlags + 1)
    c = c[Nx - 1 - maxlags:Nx + maxlags]

    if usevlines:
        a = self.vlines(lags, [0], c, **kwargs)
        b = self.axhline(**kwargs)
        kwargs.setdefault('marker', 'o')
        kwargs.setdefault('linestyle', 'None')
        d = self.plot(lags, c, **kwargs)
    else:

        kwargs.setdefault('marker', 'o')
        kwargs.setdefault('linestyle', 'None')
        a, = self.plot(lags, c, **kwargs)
        b = None
    return lags, c, a, b
示例#39
0
# header = list(data)
# del header[0]
#
# for name in header:
#     data[name + '_sq'] = np.square(data[name])
#     test_data[name + '_sq'] = np.square(test_data[name])
#
# data.to_csv('ls_data_squared.csv', index=False)
# test_data.to_csv('ls_test_data_squared.csv', index=False)
#
# """
# Square root data
# """
#
# data = pd.read_csv(path + 'ls_data.csv')
# test_data = pd.read_csv(path + 'ls_test_data.csv')
#
# for name in header:
#     data[name + '_sq'] = np.sqrt(data[name])
#     test_data[name + '_sq'] = np.sqrt(test_data[name])
#
# data.to_csv('ls_data_sqrt.csv', index=False)
# test_data.to_csv('ls_test_data_sqrt.csv', index=False)

data = pd.read_csv(path + 'test_data.csv')

numbers = np.random.normal(size=(100, 1))
numbers2 = np.random.normal(size=(100, 1))

a = np.correlate(numbers[:, 0], numbers[:, 0], "same")
示例#40
0
 def _correlate(arr1, arr2):
     """Use ``np.correlate()`` on ``mode='same'`` on two selected arrays
     from one input.
     """
     return np.correlate(arr1, arr2, mode='same')
示例#41
0
    for x in range(num_of_vals):
        val = x - len(second) + 1
        for i in range(len(first)):
            if len(second) > (i + val) >= 0:
                p = first[i] * second[i + val]
                corr[index] += p
        index += 1
    return corr


vectora = np.array([3, 2, 1])
vectorb = np.array([6, 5, 4])

# testing that np.correlate and our code produces same result
our_xcorr_arr = np.array(cross_correlation(vectora, vectorb))
builtin_xcorr_arr = np.correlate([6, 5, 4], [3, 2, 1], 'full')

np.testing.assert_equal(our_xcorr_arr, builtin_xcorr_arr)

#3

# plotting w/noise
t = np.arange(0, 2, 0.01)
x = np.sin(t * (20 * np.pi)) + np.random.rand(len(t))

fig = plt.figure()
plt.xlabel('Time (Seconds)')
plt.ylabel('Amplitude')
plt.title('sin(20pi*t + rand(1, length(t))')
plt.plot(x[:len(t)])
plt.show()
        nmaxlist.append(nmax)
        #Thus giving us the best lag time
        maxstime = stlist[maxnorm]
        smax.append(maxstime)
        #SO now we run the unnormalised cross correlation, which requires mode to be set to full
        #First changing our time windows
        slow1 = t71 + maxstime
        shi1 = t71 + maxstime + 800
        low2 = t72
        hi2 = t72 + 800

        #Need to trim the traces and taper them before correlating, using a DPSS this time
        taparr1, sigwindow = sleptap(trorig, slow1, shi1, slepper, slepwid)
        taparr2, sigwindow2 = sleptap(tr2orig, low2, hi2, slepper, slepwid)

        sigunnorm.append(correlate(taparr1, taparr2, mode='full'))
        #And we also want to run unnormalised cross correlations on noise windows, placing each one in a list of lists
        nois1low1 = t71 + maxstime - 2700
        nois1hi1 = t71 + maxstime - 1900
        nois1low2 = t72 - 2700
        nois1hi2 = t72 - 1900
        nois2low1 = t71 + maxstime - 1800
        nois2hi1 = t71 + maxstime - 1000
        nois2low2 = t72 - 1800
        nois2hi2 = t72 - 1000
        nois3low1 = t71 + maxstime - 900
        nois3hi1 = t71 + maxstime - 100
        nois3low2 = t72 - 900
        nois3hi2 = t72 - 100

        #Performing trimming and tapering on noise windows
示例#43
0
            cD = abs(cD[::(2**(levels - loop - 1))])

            cD = cD - numpy.mean(cD)
            # 6) Recombine the signal before ACF
            #    essentially, each level I concatenate
            #    the detail coefs (i.e. the HPF values)
            #    to the beginning of the array
            cD_sum = cD[0:cD_minlen] + cD_sum

        cA = signal.lfilter([0.01], [1 - 0.99], cA)
        cA = abs(cA)
        cA = cA - numpy.mean(cA)
        cD_sum = cA[0:cD_minlen] + cD_sum

        # ACF
        correl = numpy.correlate(cD_sum, cD_sum, 'full')
        # integration...
        if window_ndx == 1:
            accum_correl = numpy.zeros(len(correl))
        accum_correl = (correl + accum_correl) / len(correl)

        #iterate at the end of the loop
        window_ndx = window_ndx + 1
        samps_ndx = samps_ndx + window_step_size

    # Peak detection
    accum_zero = len(correl) / 2
    correl_final = accum_correl[accum_zero:]

    min_ndx = 60. / 220 * (fs / max_decimation)
    max_ndx = 60. / 40 * (fs / max_decimation)
 def test_default_params(self):
     '''Test default parameters.'''
     a = np.arange(10)
     c_cpp = asignal.acorr(a)
     c_np = np.correlate(a, a, mode='full')[::-1][a.size - 1:]
     np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
 def test_twosided(self):
     '''Test the two-sided version of ``corr``.'''
     for a1, a2 in _data_generator(self.maxLoops, self.maxN):
         c_cpp = asignal.corr(a1, a2, mode='twosided')
         c_np = np.correlate(a1, a2, mode='full')[::-1]
         np.testing.assert_allclose(c_cpp, c_np, rtol=RTOL)
示例#46
0
def norm_corr(x, y, mode='valid'):
    """Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""

    return (np.correlate(x, y, mode) / (np.std(x) * np.std(y) * (x.shape[-1])))
示例#47
0
def calc_rv_template(spect,wave,sig, template_dir,bad_intervals,smooth_distance=101, \
    gaussian_offset=1e-4,nwave_log=1e4,oversamp=1,fig_fn='',convolve_template=True,\
    starnumber=0, plotit=False, save_figures=False, save_dir='./', heliocentric_correction=0.):
    """Compute a radial velocity based on an best fitting template spectrum.
    Teff is estimated at the same time.
    
    Parameters
    ----------
    spect: array-like
        The reduced WiFeS spectrum
        
    wave: array-like
        The wavelengths corresponding to the reduced WiFeS spectrum
        
    template_conv_dir: string
        The directory containing template spectra convolved to 0.1 Angstrom resolution
        
    bad_intervals: 
        List of wavelength intervals where e.g. telluric absorption is bad.
        
    smooth_distance: float
        Distance to smooth for "continuum" correction
        
    oversamp: float
        Oversampling of the input wavelength scale. The slit is assumed 2 pixels wide.
    
    gaussian_offset: float
        Offset for the likelihood function from a Gaussian normalised to 1. 

        
    Returns
    -------
    rv: float
        Radial velocity in km/s
    rv_sig: float
        Uncertainty in radial velocity (NB assumes good model fit)
    temp: int
        Temperature of model spectrum used for cross-correlation.
    """
    if isinstance(template_dir, list):
        template_fns = template_dir
    else:
        template_fns = glob.glob(template_dir)

    #ADD IN HELIOCENTRIC CORRECTION SOMEWHERE:
    #Make the Heliocentric correction...
    #rv += h['RADVEL']

    #Interpolate the target and template spectra.
    (wave_log, spect_int, sig_int,
     template_ints) = interpolate_spectra_onto_log_grid(
         spect,
         wave,
         sig,
         template_fns,
         bad_intervals=bad_intervals,
         smooth_distance=smooth_distance,
         convolve_template=convolve_template,
         nwave_log=nwave_log)

    #Do a cross-correlation to the nearest "spectral pixel" for each template
    drv = np.log(wave_log[1] / wave_log[0]) * 2.998e5
    rvs = np.zeros(len(template_fns))
    peaks = np.zeros(len(template_fns))
    for i, template_fn in enumerate(template_fns):
        template_int = template_ints[i]
        if save_figures == True:
            plt.clf()
            plt.plot(wave_log, template_int, label='template')
            plt.plot(wave_log, spect_int, label='spectrum')
            plt.title('Template no.' + str(i + 1))
            plt.savefig(save_dir + 'spectrum_vs_template_' +
                        template_fns[i].split('/')[-1].split('.fits')[0] +
                        '.png')
            plt.clf()
        cor = np.correlate(spect_int, template_int, 'same')
        ##here it's a good idea to limit where the peak Xcorrelation can be, only search for a peak within 1000 of rv=0
        ## that's and RV range of -778 to 778 for the default spacings in the code
        peaks[i] = np.max(cor[int(nwave_log / 2) - 100:int(nwave_log / 2) +
                              100]) / np.sqrt(np.sum(np.abs(template_int)**2))
        rvs[i] = (np.argmax(cor[int(nwave_log / 2) - 100:int(nwave_log / 2) +
                                100]) - 100) * drv
        if starnumber == 0:
            print('Correlating Template ' + str(i + 1) + ' out of ' +
                  str(len(template_fns)))
        if starnumber > 0:
            print('Correlating Template ' + str(i + 1) + ' out of ' +
                  str(len(template_fns)) + ' for star ' + str(starnumber))
        this_rvs = drv * (np.arange(2 * smooth_distance) - smooth_distance)
        correlation = cor[int(nwave_log / 2) - 100:int(nwave_log / 2) +
                          100] / np.sqrt(np.sum(np.abs(template_int)**2))
        best_ind = np.argmax(correlation)
        print("best RV for template " + str(i + 1) + " is " +
              str(this_rvs[best_ind + 1] + heliocentric_correction))
        if save_figures == True:
            plt.clf()
            plt.plot(this_rvs[1:-1], correlation / np.max(correlation))
            plt.title('Correlation_with_template_no.' + str(i + 1))
            plt.savefig(save_dir + 'Correlation_with_template_no' +
                        str(i + 1) + '.png')
            plt.clf()

    #Find the best cross-correlation.
    ix = np.argmax(peaks)
    print("BEST TEMPLATE:" + template_fns[ix].split('/')[-1])

    #Recompute and plot the best cross-correlation
    template_int = template_ints[ix, :]
    cor = np.correlate(spect_int, template_int, 'same')
    plt.clf()
    plt.plot(
        drv * (np.arange(2 * smooth_distance) - smooth_distance),
        cor[int(nwave_log / 2) - smooth_distance:int(nwave_log / 2) +
            smooth_distance])

    ##store the figure data for later use
    outsave = np.array([
        drv * (np.arange(2 * smooth_distance) - smooth_distance),
        cor[int(nwave_log / 2) - smooth_distance:int(nwave_log / 2) +
            smooth_distance]
    ])
    saveoutname = fig_fn.split('.png')[0] + "_figdat.pkl"
    pickle.dump(outsave, open(saveoutname, "wb"))

    plt.xlabel('Velocity (km/s)')
    plt.ylabel('X Correlation')
    #plt.show()
    fn_ix = template_fns[ix].rfind('/')
    #Dodgy! Need a better way to find a name for the template.
    fn_ix_delta = template_fns[ix][fn_ix:].find(':')
    if fn_ix_delta > 0:
        name = template_fns[ix][fn_ix + 1:fn_ix + fn_ix_delta]
        name_string = name
        #A little messy !!!
        if name[0] == 'p':
            name = name[1:]
            name_string = 'T = ' + name + ' K'
    name_string = template_fns[ix][fn_ix + 1:]

    #pdb.set_trace()
    #Fit for a precise RV... note that minimize (rather than minimize_scalar) failed more
    #often for spectra that were not good matches.
    modft = np.fft.rfft(template_int)
    #res = op.minimize(rv_fit_mlnlike,rvs[ix]/drv,args=(modft,spect_int,sig_int,gaussian_offset))
    #x = res.x[0]
    #res = op.minimize_scalar(rv_fit_mlnlike,args=(modft,spect_int,sig_int,gaussian_offset),bounds=((rvs[ix]-1)/drv,(rvs[ix]+1)/drv))
    #x = res.x
    #fval = res.fun
    x, fval, ierr, numfunc = op.fminbound(rv_fit_mlnlike,
                                          rvs[ix] / drv - 5 / drv,
                                          rvs[ix] / drv + 5 / drv,
                                          args=(modft, spect_int, sig_int,
                                                gaussian_offset),
                                          full_output=True)
    rv = x * drv
    rv += heliocentric_correction
    ##best model
    shifted_mod = np.fft.irfft(
        modft *
        np.exp(-2j * np.pi * np.arange(len(modft)) / len(spect_int) * x))
    #pdb.set_trace()
    fplus = rv_fit_mlnlike(x + 0.5, modft, spect_int, sig_int, gaussian_offset)
    fminus = rv_fit_mlnlike(x - 0.5, modft, spect_int, sig_int,
                            gaussian_offset)
    hess_inv = 0.5**2 / (fplus + fminus - 2 * fval)
    if (hess_inv < 0) | (fplus < fval) | (fminus < fval):
        #If you get here, then there is a problem with the input spectrum or fitting.
        #raise UserWarning
        print(
            "WARNING: Radial velocity fit did not work - trying again with wider range for: "
            + fig_fn)
        x, fval, ierr, numfunc = op.fminbound(rv_fit_mlnlike,
                                              rvs[ix] / drv - 10 / drv,
                                              rvs[ix] / drv + 10 / drv,
                                              args=(modft, spect_int, sig_int,
                                                    gaussian_offset),
                                              full_output=True)
        rv = x * drv
        #print("RV ="+str(rv)+", fval ="+str(fval))
        fplus = rv_fit_mlnlike(x + 0.5, modft, spect_int, sig_int,
                               gaussian_offset)
        #print("fplus ="+str(fplus))
        fminus = rv_fit_mlnlike(x - 0.5, modft, spect_int, sig_int,
                                gaussian_offset)
        #print("fminus ="+str(fminus))
        hess_inv = 0.5**2 / (fplus + fminus - 2 * fval)
        #print("hess_inv ="+str(hess_inv))
        #import pdb
        #pdb.set_trace()

        if (hess_inv < 0) | (fplus < fval) | (fminus < fval):
            print(
                "WARNING: Radial velocity fit did not work, giving up with NaN uncertainty"
            )

    rv_sig = np.sqrt(hess_inv * nwave_log / len(spect) / oversamp) * drv

    plt.title('RV, RV_sigma:' + str(rv) + ',' + str(rv_sig))
    plt.savefig(save_dir + 'Best_correlation_temp_' +
                template_fns[ix].split('/')[-1] + '.png')
    plt.title(name_string +
              ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv, rv_sig))
    if len(fig_fn) > 0:
        plt.savefig(fig_fn)
    plt.clf()
    plt.plot(wave_log, spect_int)
    plt.plot(wave_log, shifted_mod)
    plt.xlim([6400.0, 6700.0])
    plt.title(name_string +
              ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv, rv_sig))
    if len(fig_fn) > 0:
        fig_fn_new = fig_fn.split('_xcor.png')[0] + 'fitplot.png'
        plt.savefig(fig_fn_new)
    #again save the figure data for use later in making nicer plots with IDL
    outsave = np.array([wave_log, spect_int, shifted_mod])
    saveoutname = fig_fn.split('_xcor.png')[0] + 'fitplot_figdat.pkl'
    pickle.dump(outsave, open(saveoutname, "wb"))
    # pdb.set_trace()
    return rv, rv_sig, template_fns[ix].split('/')[-1]
s = np.cumsum(sequence2)
chopper = np.zeros(len(x) - 1)
chopper_n = np.zeros(len(x) - 1)
l = len(chopper)

for n in range(l):
    i = np.searchsorted(s * scale_m2,
                        ((x[n] + x[n + 1]) / 2 / 16666.67) * 360. * freq / 60.)
    chopper_n[n] = i
    if i % 2 == 1:
        chopper[n] = 1

chopper2 = np.zeros(len(chopper) * 2)
chopper2 = np.append(chopper, chopper)

corr = np.correlate(y, chopper2)
r = np.argmax(corr)
r2 = (x[r] + x[r + 1]) / 2

print "Chopper sequence offset = ", r2, "uS"

s = np.cumsum(sequence2)
m3chopper = np.zeros(len(x) - 1)
m3chopper_n = np.zeros(len(x) - 1)
l = len(m3chopper)

for n in range(l):
    i = np.searchsorted(s * scale_m3,
                        ((x[n] + x[n + 1]) / 2 / 16666.67) * 360. * freq / 60.)
    m3chopper_n[n] = i
    if i % 2 == 1:
示例#49
0
import matplotlib as mpl
mpl.rc('font', size=14,
       weight='bold')  #set default font size and weight for plots

plt.figure(figsize=(20, 5))
plt.plot(x)
plt.xlabel("Observation")
plt.title("Time Series of Random Normal Data")
plt.show()

# We can calculate the autocorrelation function using the `np.correlate()` function. Note that this function computes the autocovariance (except it does not divide by $N$), not the autocorrelation, so we have to make some adjustments to convert to autocorrelation. In this case, we simply divide by $N$, because we are using random data drawn from a standard normal distribution, i.e. $\sigma$ = 1.

# In[3]:

# calculate the autocorrelation of x
acorr = np.correlate(x, x / len(x), 'same')

# To visualize the autocorrelation function, we plot it as a function of lag, $\tau$.

# In[4]:

# plot the autocorrelation as a function of lag
fig = plt.figure(figsize=(10, 5))

# define a variable for lag
t = np.arange(0, len(x), 1)
lag = [i - len(t) / 2 for i in t]

# plot acorr
plt.axhline(0)
plt.axvline(0)
示例#50
0
def synchronize(signal1=None, signal2=None):
    """Align two signals based on cross-correlation.

    Parameters
    ----------
    signal1 : array
        First input signal.
    signal2 : array
        Second input signal.

    Returns
    -------
    delay : int
        Delay (number of samples) of 'signal1' in relation to 'signal2';
        if 'delay' < 0 , 'signal1' is ahead in relation to 'signal2';
        if 'delay' > 0 , 'signal1' is delayed in relation to 'signal2'.
    corr : float
        Value of maximum correlation.
    synch1 : array
        Biggest possible portion of 'signal1' in synchronization.
    synch2 : array
        Biggest possible portion of 'signal2' in synchronization.

    """

    # check inputs
    if signal1 is None:
        raise TypeError("Please specify the first input signal.")

    if signal2 is None:
        raise TypeError("Please specify the second input signal.")

    n1 = len(signal1)
    n2 = len(signal2)

    # correlate
    corr = np.correlate(signal1, signal2, mode='full')
    x = np.arange(-n2 + 1, n1, dtype='int')
    ind = np.argmax(corr)

    delay = x[ind]
    maxCorr = corr[ind]

    # get synchronization overlap
    if delay < 0:
        c = min([n1, len(signal2[-delay:])])
        synch1 = signal1[:c]
        synch2 = signal2[-delay:-delay + c]
    elif delay > 0:
        c = min([n2, len(signal1[delay:])])
        synch1 = signal1[delay:delay + c]
        synch2 = signal2[:c]
    else:
        c = min([n1, n2])
        synch1 = signal1[:c]
        synch2 = signal2[:c]

    # output
    args = (delay, maxCorr, synch1, synch2)
    names = ('delay', 'corr', 'synch1', 'synch2')

    return utils.ReturnTuple(args, names)
示例#51
0
def get_y_shift(x_corrected_flat):
    max_shift, extent, up_sampling = 50, 20, 10

    no_of_vertical_pixels = x_corrected_flat.shape[0]

    vertical_indices = np.arange(no_of_vertical_pixels)

    shift_horizontal = np.zeros(no_of_vertical_pixels)

    shift_horizontal_fit = np.zeros(no_of_vertical_pixels)

    weights = np.ones(no_of_vertical_pixels)

    display = copy.deepcopy(x_corrected_flat)

    plt.figure('Click on the line profile to trace')

    plt.imshow(display, cmap='gray', origin='lower')

    point = plt.ginput(1)

    plt.clf()

    plt.cla()

    point_as_a_list = list(map(int, point[0]))

    reference_row = int(point_as_a_list[1])

    x_beg = int(point_as_a_list[0] - extent / 2)

    x_end = int(point_as_a_list[0] + extent / 2)

    slit_reference = np.mean(display[reference_row - 10:reference_row + 10,
                                     x_beg:x_end],
                             axis=0)

    normalised_slit = (slit_reference -
                       slit_reference.mean()) / slit_reference.std()

    for j in vertical_indices:
        this_slit = display[j, x_beg - max_shift:x_end + max_shift]

        weights[j] = np.sqrt(this_slit.mean()**2)

        this_slit_normalised = (this_slit - this_slit.mean()) / this_slit.std()

        correlation = np.correlate(scipy.ndimage.zoom(this_slit_normalised,
                                                      up_sampling),
                                   scipy.ndimage.zoom(normalised_slit,
                                                      up_sampling),
                                   mode='valid')

        shift_horizontal[j] = np.argmax(correlation)

    shift_horizontal = shift_horizontal / up_sampling - max_shift

    valid_x_points = np.argwhere(np.abs(shift_horizontal) < max_shift)

    shifts_for_valid_points = shift_horizontal[valid_x_points]

    c = np.polyfit(valid_x_points.ravel(),
                   shifts_for_valid_points.ravel(),
                   2,
                   w=np.nan_to_num(weights)[valid_x_points].ravel())

    shift_horizontal_fit = c[0] * vertical_indices**2 + \
        c[1] * vertical_indices + c[2]

    shift_horizontal_apply = -shift_horizontal_fit

    plt.plot(valid_x_points, shifts_for_valid_points, 'k-',
             shift_horizontal_fit, 'k-')

    plt.show()

    return shift_horizontal_apply
# Lee archivo de entrada
fs, d = wavfile.read('Tamara_Laurel_-_Sweet_extract.wav')
d = np.float32(d)

# Simula un canal
u, w_true = simulate_channel(d, 80);

# Extrae segmentos de un segundo de ambas señales
s_start = 8;
d = d[s_start * fs:(s_start + 1) * fs];
u = u[s_start * fs:(s_start + 1) * fs];

# Estima la autocorrelación y correlación cruzada
N_THETA = 6;
r = np.correlate(u, u, 'full') / len(u);
r = r[(len(u) - 1):len(u) - 1 + N_THETA];
R = toeplitz(r);

p = np.correlate(d, u, 'full') / len(u);
p = p[(len(u) - 1):len(u) - 1 + N_THETA];

# Determina el filtro óptimo Wiener
w_wiener = inv(R).dot(p);

# Encuentra el filtro óptimo Wiener con descenso por gradiente
mus = [1e-10, 1e-9, 1e-8, 1e-7]; # Diferentes tamaños de paso
w0 = np.zeros(N_THETA);
for mu in mus:
    N = 5000; # Número de iteraciones
示例#53
0
def measure_offset_correlate(samples_a: List[float], samples_b: List[float],
                             sampling_rate: float,  mode: str = "parabolic") -> float:
    auto = np.correlate(samples_a, samples_b, mode="full")
    best_offset = np.argmax(auto)
    return interpolate(auto, best_offset, mode) * (1/sampling_rate)  # offset in sec
示例#54
0
def calculateCorr(s0, s1):
    """
        Calculates the correlation between the fourier transformation of the time series
    """
    return np.correlate(dct(s0, norm='ortho'), dct(s1, norm='ortho'))
示例#55
0
def calc_offset(filename, resolution=1, scale=1.0, plot=False):

    Load(Filename='/SNS/CORELLI/IPTS-12008/nexus/' + filename + '.nxs.h5',
         OutputWorkspace=filename,
         LoadMonitors='1',
         MonitorsAsEvents='1')
    LoadInstrument(Workspace=filename,
                   Filename='/SNS/users/rwp/CORELLI_Definition.xml')
    ScaleX(InputWorkspace=filename + '_monitors',
           OutputWorkspace=filename + '_monitors',
           Factor=str(scale))
    Rebin(InputWorkspace=filename + '_monitors',
          OutputWorkspace=filename + '_monitors',
          Params=str(resolution))

    w = mtd[filename]
    sequence = map(
        float,
        w.getInstrument().getComponentByName(
            'correlation-chopper').getStringParameter('sequence')[0].split())
    freq = round(
        w.getRun().getProperty("BL9:Chop:Skf4:MotorSpeed").timeAverageValue())
    delay = w.getRun().getProperty(
        "BL9:Chop:Skf4:PhaseTimeDelaySet").timeAverageValue()

    print filename, 'MotorSpeed =', freq, 'Hz', 'PhaseTimeDelaySet =', delay, 'uS'

    if freq % 60 != 0:
        print 'Frequency not a multiple of 60.'
        return

    sequence2 = sequence
    for i in range(int(freq / 60) - 1):
        sequence2 = np.append(sequence2, sequence)

    m = mtd[filename + '_monitors']
    x = m.extractX()[1]
    y = m.extractY()[1]

    s = np.cumsum(sequence2)
    chopper = np.zeros(len(x) - 1)
    l = len(chopper)

    for n in range(l):
        if np.searchsorted(s, (
            (x[n] + x[n + 1]) / 2 / 16666.67) * 360. * freq / 60.) % 2 == 1:
            chopper[n] = 1

    chopper2 = np.zeros(len(chopper) * 2)
    chopper2 = np.append(chopper, chopper)

    #y=np.roll(chopper,1337)

    corr = np.correlate(y, chopper2)
    r = np.argmax(corr)
    r2 = (x[r] + x[r + 1]) / 2

    #print x[-1]
    print "Chopper sequence offset = ", r2, "uS"

    chopper = np.roll(chopper, r)
    if plot:
        plt.plot(x[:-1], y)
        plt.plot(x[:-1], chopper * y.max() * 0.5)
        plt.show()
    return (freq, delay, r2, corr.max())
示例#56
0
def SSA(s, m, rtnRC=1, pad='linear', **kwargs):
    """Implement Singular Spectrum Analysis for pandas Series

    Parameters
    ----------
    s : pandas.Series
        Input data, in series or single columns of a data frame. Any necessary
        normalization (e.g. for anomolies from baseline) should be done.
    m : int
        Order or number of time lags to calculate over. A larger number gives
        more smoothing in the first returned column.

    **Optionals**

    rtnRC : int
        Number of reconstructed principles to return. Set to None to get all
        of them. Most smoothing is done in first returned column, but other
        columns may be useful to see periodicities.
    pad : str ['linear' | 'mirror' | None] default 'linear'
        Type of padding to use. If no padding desired, use ``None``.

    Returns
    -------
    pandas.DataFrame containing the reconstructed principles (or just the first
    one if allRC is True).

    Notes
    -----
    Computes the first m principle components (PCs) using Singular Spectrum
    Analysis. Most of the trend information is in the first reconstructed PC
    (RC), so the function returns just the first RC by default. This RC will
    look like smoothed data, and the amount of smoothing is determined by how
    large `m` is. Note that padding is added to prevent a drop towards 0 at
    beginning and end.

    Examples
    --------
    from:
    http://environnement.ens.fr/IMG/file/DavidPDF/SSA_beginners_guide_v9.pdf
    ::

        %precision 2
        import pandas as pd
        import numpy as np
        import matplotlib as plt

        y = [1.0135518, - 0.7113242, - 0.3906069, 1.565203, 0.0439317,
             - 1.1656093, 1.0701692, 1.0825379, - 1.2239744, - 0.0321446,
             1.1815997, - 1.4969448, - 0.7455299, 1.0973884, - 0.2188716,
             - 1.0719573, 0.9922009, 0.4374216, - 1.6880219, 0.2609807]

        rc = SSA(pd.Series(y), 4, allRC=None, pad=None)
        plt.plot(rc)
        plt.show()

        rc[0].values.flatten()

        array([ 0.3 , -0.31, -0.33,  0.82, -0.06, -0.82,  0.54,  0.53, -0.88,
                0.07,  0.83, -0.66, -0.36,  0.83, -0.18, -0.72,  0.63,  0.23,
               -0.68,  0.24])

    """

    if pad:
        ps = Padded(s, m * 2, ptype=pad)
        y = np.array(ps.values)
    else:
        y = np.array(s.values)
    n = len(y)
    mr = range(m)
    ys = np.ones((n, m))  # time shifted y-values
    for i in mr:
        ys[:n - i, i] = y[i:]
    # get autocorrelation at first `order` time lags
    cor = np.correlate(y, y, mode='full')[n - 1:n - 1 + m] / n
    # make toeplitz matrix (diagonal, symmetric)
    c = np.array([[cor[abs(i - j)] for i in mr] for j in mr])
    # get eigenvalues and eigenvectors
    lam, rho = np.linalg.eig(c)
    pc = ys.dot(rho)  # principle components
    # reconstruct the components in proper time frame
    if rtnRC is None:
        desired = m
    else:
        desired = min(m, rtnRC)
    rc = np.zeros((n, desired))
    for j in range(desired):
        z = np.zeros((n, m))
        for i in mr:  # make time shifted principle component matrix
            z[i:, i] = pc[:n - i, j]
        rc[:, j] = z.dot(rho[:, j]) / m
    if pad:
        rc = rc[m:n - m]
    return pd.DataFrame(rc, index=s.index)
示例#57
0
def update_figure(event):

    if icontainer.stage == 0 and icontainer.i < 7 and icontainer.drawdist:
        i = icontainer.i
        icontainer.drawdist = False
        # Remove previous lines
        for l in icontainer.remove_lines:
            ax.lines.remove(l)
        icontainer.remove_lines = []
        if i % 2 == 0:
            line = ax.axhline(y=tt[i, 1], linestyle='--', color='k')
            icontainer.remove_lines.append(line)
            line, = ax.plot(Y1,
                            tt[i, 1] +
                            stats.norm.pdf(Y1,
                                           loc=y1 + r * (tt[i, 1] - y2),
                                           scale=np.sqrt((1 - r**2))),
                            color='#377eb8')
            icontainer.remove_lines.append(line)
            if i == 0:
                ax.legend((el_legend, samp_legend, pdfline_legend),
                          ('90% HPD', 'Starting point',
                           r'Conditional density given $\theta_2$'),
                          numpoints=1,
                          loc='lower right')
            else:
                ax.legend((el_legend, samp_legend, pdfline_legend),
                          ('90% HPD', 'Samples from the chain',
                           r'Conditional density given $\theta_2$'),
                          loc='lower right')
        else:
            line = ax.axvline(x=tt[i, 0], linestyle='--', color='k')
            icontainer.remove_lines.append(line)
            line, = ax.plot(tt[i, 0] + stats.norm.pdf(
                Y2, loc=y2 + r * (tt[i, 0] - y1), scale=np.sqrt((1 - r**2))),
                            Y2,
                            color='#377eb8')
            icontainer.remove_lines.append(line)
            ax.legend((el_legend, samp_legend, pdfline_legend),
                      ('90% HPD', 'Samples from the chain',
                       r'Conditional density given $\theta_1$'),
                      loc='lower right')

        fig.canvas.draw()

    elif icontainer.stage == 0 and icontainer.i < 7 and not icontainer.drawdist:
        icontainer.i += 1
        i = icontainer.i
        if i == 6:
            icontainer.stage += 1
        icontainer.drawdist = True
        sampi, = ax.plot(tt[i, 0],
                         tt[i, 1],
                         'o',
                         markerfacecolor='none',
                         markeredgecolor='#377eb8')
        icontainer.samps.append(sampi)
        if i == 1:
            ax.legend((el_legend, samp_legend, pdfline_legend),
                      ('90% HPD', 'Samples from the chain',
                       r'Conditional density given $\theta_2$'),
                      loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 1:
        icontainer.stage += 1
        for l in icontainer.remove_lines:
            ax.lines.remove(l)
        icontainer.remove_lines = []
        ax.legend((el_legend, samp_legend),
                  ('90% HPD', 'Samples from the chain'),
                  loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 2:
        icontainer.stage += 1
        for s in icontainer.samps:
            ax.lines.remove(s)
        icontainer.samps = []
        line, = ax.plot(tt[:icontainer.i + 1, 0],
                        tt[:icontainer.i + 1, 1],
                        color='#377eb8')
        icontainer.samps.append(line)
        line, = ax.plot(tt[:icontainer.i + 1:2, 0],
                        tt[:icontainer.i + 1:2, 1],
                        'o',
                        markerfacecolor='none',
                        markeredgecolor='#377eb8')
        icontainer.samps.append(line)
        ax.legend((el_legend, chain_legend), ('90% HPD', 'Markov chain'),
                  loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 3:
        icontainer.stage += 1
        # modify helper text
        htext.set_text('Gibbs sampling\npress `q` to skip animation')
        # start the timer
        anim_thread.start()

    elif icontainer.stage == 4 and event.key == 'q':
        # stop the animation
        stop_anim.set()

    elif icontainer.stage == 5:
        icontainer.stage += 1
        for s in icontainer.samps:
            ax.lines.remove(s)
        icontainer.samps = []
        # remove helper text
        icontainer.itertext.remove()
        line, = ax.plot(tt[:burnin, 0], tt[:burnin, 1], color='m')
        icontainer.samps.append(line)
        line, = ax.plot(tt[:burnin:2, 0],
                        tt[:burnin:2, 1],
                        'o',
                        markerfacecolor='none',
                        markeredgecolor='m')
        icontainer.samps.append(line)
        line, = ax.plot(tt[burnin:nanim + 1, 0],
                        tt[burnin:nanim + 1, 1],
                        color='#377eb8')
        icontainer.samps.append(line)
        line, = ax.plot(tt[burnin:nanim + 1:2, 0],
                        tt[burnin:nanim + 1:2, 1],
                        'o',
                        markerfacecolor='none',
                        markeredgecolor='#377eb8')
        icontainer.samps.append(line)
        ax.legend((el_legend, chain_legend, burnchain_legend),
                  ('90% HPD', 'Markov chain', 'warm-up'),
                  loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 6:
        icontainer.stage += 1
        for s in icontainer.samps:
            ax.lines.remove(s)
        icontainer.samps = []
        line, = ax.plot(tt[burnin:nanim + 1:2, 0],
                        tt[burnin:nanim + 1:2, 1],
                        'o',
                        markerfacecolor='none',
                        markeredgecolor='#377eb8')
        icontainer.samps.append(line)
        ax.legend((el_legend, samp_legend),
                  ('90% HPD', 'samples from the chain after warm-up'),
                  loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 7:
        icontainer.stage += 1
        for s in icontainer.samps:
            ax.lines.remove(s)
        icontainer.samps = []
        points = ax.scatter(tt[burnin::2, 0],
                            tt[burnin::2, 1],
                            10,
                            alpha=0.5,
                            color='#377eb8')
        icontainer.samps.append(points)
        ax.legend((el_legend, points),
                  ('90% HPD', '950 samples from the chain'),
                  loc='lower right')
        fig.canvas.draw()

    elif icontainer.stage == 8:
        icontainer.stage += 1
        fig.clear()

        indexes = np.arange(burnin, M, 2)
        samps = tt[indexes]

        ax1 = fig.add_subplot(3, 1, 1)
        ax1.axhline(y=0, linewidth=1, color='gray')
        line1, line2, = ax1.plot(indexes / 2, samps, linewidth=1)
        ax1.legend((line1, line2), (r'$\theta_1$', r'$\theta_2$'))
        ax1.set_xlabel('iteration')
        ax1.set_title('trends')
        ax1.set_xlim([burnin / 2, 1000])

        ax2 = fig.add_subplot(3, 1, 2)
        ax2.axhline(y=0, linewidth=1, color='gray')
        ax2.plot(indexes / 2,
                 np.cumsum(samps, axis=0) / np.arange(1,
                                                      len(samps) + 1)[:, None],
                 linewidth=1.5)
        ax2.set_xlabel('iteration')
        ax2.set_title('cumulative average')
        ax2.set_xlim([burnin / 2, 1000])

        ax3 = fig.add_subplot(3, 1, 3)
        maxlag = 20
        sampsc = samps - np.mean(samps, axis=0)
        acorlags = np.arange(maxlag + 1)
        ax3.axhline(y=0, linewidth=1, color='gray')
        for i in [0, 1]:
            t = np.correlate(sampsc[:, i], sampsc[:, i], 'full')
            t = t[-len(sampsc):-len(sampsc) + maxlag + 1] / t[-len(sampsc)]
            ax3.plot(acorlags, t)
        ax3.set_xlabel('lag')
        ax3.set_title('estimate of the autocorrelation function')

        fig.suptitle('Gibbs sampling - press any key to continue...',
                     fontsize=18)
        fig.subplots_adjust(hspace=0.6)
        fig.canvas.draw()

    elif icontainer.stage == 9:
        icontainer.stage += 1
        fig.clear()

        indexes = np.arange(burnin, M, 2)
        samps = tt[indexes]
        nsamps = np.arange(1, len(samps) + 1)

        ax1 = fig.add_subplot(1, 1, 1)
        ax1.axhline(y=0, linewidth=1, color='gray')
        line1, line2, = ax1.plot(indexes / 2,
                                 np.cumsum(samps, axis=0) / nsamps[:, None],
                                 linewidth=1.5)
        er1, = ax1.plot(indexes / 2,
                        1.96 / np.sqrt(nsamps / 4),
                        'k--',
                        linewidth=1)
        ax1.plot(indexes / 2, -1.96 / np.sqrt(nsamps / 4), 'k--', linewidth=1)
        er2, = ax1.plot(indexes / 2, 1.96 / np.sqrt(nsamps), 'k:', linewidth=1)
        ax1.plot(indexes / 2, -1.96 / np.sqrt(nsamps), 'k:', linewidth=1)
        ax1.set_xlabel('iteration')
        ax1.set_title('Gibbs sampling\ncumulative average')
        ax1.legend(
            (line1, line2, er1, er2),
            (r'$\theta_1$', r'$\theta_2$', '95% interval for MCMC error',
             '95% interval for independent MC'))
        ax1.set_xlim([burnin / 2, 1000])
        ax1.set_ylim([-2, 2])
        fig.canvas.draw()
示例#58
0
def smart_align(template, observations):

    aligned_obs = np.zeros((observations.shape[1], observations.shape[0]))
    aligned_obs_norm = np.zeros((observations.shape[1], observations.shape[0]))

    for n in tqdm_notebook(range(observations.shape[1])):
        #print('Aligning observation',n+1,'of',observations.shape[1])

        obs = observations[:, n] / np.max(observations[:, n])
        bins = template.shape[0]
        first_try = 0
        no_scale_incr = 100
        template_noise_list = []
        obs_noise_list = []
        bins_with_signal_test = []
        list_of_means = []
        list_of_stds = []
        list_list_means = []
        list_list_stds = []
        list_list_no_points = []
        min_arg_list = []
        min_val_list = []
        std_min_val_list = []
        mean_times_points = []

        # Correlate to find rough alignment and then start with a fractional offset before rolling the observations past each other

        # make sure observations don't span the edge
        # rotate template to put peak at 1/4
        peak_bin = np.argmax(obs)
        initial_shift = int(bins / 4) - peak_bin
        obs = np.roll(obs, initial_shift)

        xcorr = np.correlate(template, obs, "full")
        lag = np.argmax(xcorr)
        obs = np.roll(obs, lag)
        # obs = np.roll(obs,-int(bins/7.0))

        # Break the template into 8 parts and find the rms of each. Then find the smallest. Do with the observation too.
        for z in range(8):
            template_noise_list.append(
                np.std(template[z * int(bins / 8.0):(z + 1) *
                                int(bins / 8.0)]))
            obs_noise_list.append(
                np.std(obs[z * int(bins / 8.0):(z + 1) * int(bins / 8.0)]))

# Find the approximate peaks of template and observation so give an idea of the range over which to scale the observations.

        temp_peak = np.mean(np.sort(template)[-10:])
        obs_peak = np.mean(np.sort(obs)[-10:])
        rough_scale = temp_peak / obs_peak
        rough_scale_upp = rough_scale * 1.1
        rough_scale_low = rough_scale * 0.9
        scale_incr = (rough_scale_upp - rough_scale_low) / no_scale_incr

        # Keep a copy of the observation in its original state.
        obs_original = obs[:]

        # Phase shift over all bins.
        for roll in range(int(bins / 3.5)):
            #            if (roll+1)%100 == 0:
            #                print( 'Bin',roll+1,'out of',int(bins/3.5)
            closest_to_one = 1e10
            bins_with_signal = []
            bins_with_signal_test = []
            list_mean_each_scale_shift = []
            list_std_each_scale_shift = []
            list_points_each_scale_shift = []
            # No shift needed for first try.
            if roll != 0:
                obs = np.roll(obs, 1)


# If the level is too low in either template or observation, don't include the bin in further analysis.
            for r in range(bins):
                #print( r,obs[r],obs_peak,np.min(obs_noise_list),template[r],temp_peak,np.min(template_noise_list)
                if obs[r] > obs_peak / 3. and template[r] > temp_peak / 3.:
                    bins_with_signal.append(r)
                    bins_with_signal_test.append(1)
                else:
                    bins_with_signal_test.append(0)

        # For each roll, only proceed if there are more than 20 bins that have signal in them.
            if len(bins_with_signal) >= 10.0:
                # Loop over each of the 100 scale attempts to find which is the best fit.
                for s in range(no_scale_incr):
                    if s == 0:
                        first_scale_val = rough_scale_low + s * scale_incr
                    if s == no_scale_incr - 1:
                        last_scale_val = rough_scale_low + s * scale_incr
                    diff = []
                    escape = 0
                    scaled_obs = obs * (rough_scale_low + s * scale_incr)
                    # Loop over all the bins with signal and find the absolute difference between template and observation.
                    for each in bins_with_signal:
                        diff.append(abs(scaled_obs[each] - template[each]))
        # Save this difference list before outliers are removed.
                    orig_diff = diff[:]
                    # Remove outliers (over 2 sigma) and re-evaluate the mean. If mean doesn't change much, exit the loop. Record the last set of data that had outliers removed.
                    while escape == 0:
                        diff_mean = np.mean(diff)
                        diff_std = np.std(diff)
                        outlier_list = []
                        for y in range(len(diff)):
                            if abs(diff[y] - diff_mean) > 2 * diff_std:
                                outlier_list.append(y)
                        latest_diff_list = diff[:]
                        for index in sorted(outlier_list, reverse=True):
                            del diff[index]

                        if np.mean(diff) == 0:
                            escape = 1
                            diff = latest_diff_list[:]
                        else:
                            if np.mean(diff) / diff_mean < 1.001 and np.mean(
                                    diff
                            ) / diff_mean > 0.999 and first_try == 1:
                                escape = 1
                                diff = latest_diff_list[:]
                        first_try = 1
        # In lists - For any phase, record the mean and std and number of data points after all outliers removed at each scale attempt.
                    list_mean_each_scale_shift.append(abs(np.mean(diff)))
                    list_std_each_scale_shift.append(np.std(diff))
                    list_points_each_scale_shift.append(len(diff))

        # Make a list containing the above lists. 1 per phase shift.
                list_list_means.append(list_mean_each_scale_shift)
                list_list_stds.append(list_std_each_scale_shift)
                list_list_no_points.append(list_points_each_scale_shift)

            else:
                # If the number of bins with signal is not high enough, just put 1s into the list of lists. We will find minimum later, and 1 is >>.
                list_list_means.append([1] * no_scale_incr)
                list_list_stds.append([1] * no_scale_incr)
                list_list_no_points.append([1] * no_scale_incr)

        # Calculate the mean / number of points. This should be minimised to find the best fit.
        for h in range(len(list_list_means)):
            for y in range(len(list_list_means[0])):
                mean_times_points.append(list_list_means[h][y] /
                                         list_list_no_points[h][y])

        min_arg_final = np.argmin(mean_times_points)
        the_scale = min_arg_final % no_scale_incr
        the_roll = min_arg_final / no_scale_incr
        min_val_final = np.min(mean_times_points)
        std_min_val_final = list_list_stds[int(
            min_arg_final / no_scale_incr)][int(min_arg_final % no_scale_incr)]

        # Return the aligned and scaled observations.

        aligned_obs_norm[n, :] = np.roll(
            obs_original * (rough_scale_low + the_scale * scale_incr),
            int(the_roll))
        #        obs = observations[:,n]/np.max(observations[:,n])

        aligned_obs[n, :] = np.roll(obs_original * np.max(observations[:, n]),
                                    int(the_roll))
    aligned_obs = np.transpose(aligned_obs)
    aligned_obs_norm = np.transpose(aligned_obs_norm)

    return aligned_obs_norm, aligned_obs
示例#59
0
    plt.plot(
        np.mean(temperature[nlabel * (house_i - 1):nlabel * house_i]),
        np.mean(energy_production[nlabel * (house_i - 1):nlabel * house_i]),
        '.')
    plt.subplot(1, 3, 3)
    plt.plot(
        np.mean(daylight[nlabel * (house_i - 1):nlabel * house_i]),
        np.mean(energy_production[nlabel * (house_i - 1):nlabel * house_i]),
        '.')

    #     raw_input('Here.')
    #     plt.draw()

    mode = 'full'
    tt = np.correlate(temperature[nlabel * (house_i - 1):nlabel * house_i],
                      temperature[nlabel * (house_i - 1):nlabel * house_i],
                      mode=mode)
    #     tt = tt[len(tt)/2:]
    dd = np.correlate(daylight[nlabel * (house_i - 1):nlabel * house_i],
                      daylight[nlabel * (house_i - 1):nlabel * house_i],
                      mode=mode)
    dd = dd[len(dd) / 2:]
    ee = np.correlate(
        energy_production[nlabel * (house_i - 1):nlabel * house_i],
        energy_production[nlabel * (house_i - 1):nlabel * house_i],
        mode=mode)
    ee = ee[len(ee) / 2:]
    td = np.correlate(temperature[nlabel * (house_i - 1):nlabel * house_i],
                      daylight[nlabel * (house_i - 1):nlabel * house_i],
                      mode=mode)
    td = td[len(td) / 2:]
示例#60
0
def autocorr(x):
    result = numpy.correlate(x, x, mode='full')
    return result[result.size / 2:]