Exemple #1
0
def run_analysis():
	exp=0
	spikecorr=[]
	correlations=[]
	frateX=[]
	frateY=[]
	bsCorr=[]
	while exp < trials:
		(listX,cntX,spikesX),(listY,cntY,spikesY)=brainalyze.loadExperiment(parameters['BRAINPREFIX'],exp)
		#(listX,cntX,spikesX),(listY,cntY,spikesY)=brainalyze.loadExperiment('vb-exp',8)
		print "cntX,spikesX: ", cntX, spikesX
		print "cntY,spikesY: ", cntY, spikesY
		sxcorr = scipy.correlate(spikesX, spikesY)
		spikecorr.append(sxcorr[sxcorr.argmax()])
		xcorr = scipy.correlate(listX,listY)
		correlations.append(xcorr[xcorr.argmax()])
		# convert to Brian-style spike trains
		bsSpikesX=map(lambda x: float(x)/10000, spikesX)
		frateX.append(statistics.firing_rate(bsSpikesX))
		# only if we have more than 1 spike in Y can we calculate firing rate
		if cntY > 1:
			bsSpikesY=map(lambda x: float(x)/10000, spikesY)
			frateY.append(statistics.firing_rate(bsSpikesY))
			bsCorr.append(statistics.total_correlation(bsSpikesX,bsSpikesY))
		else:
			bsCorr.append(-1.0)
			frateY.append(0.0)
		exp=exp+1
	print "Done"
	return (frateX,frateY,bsCorr)
Exemple #2
0
def align_on_SP(S_list, SP_list, s_S_list, s_SP_list, az_bin, **kwargs):
    plot = kwargs.get('plot', False)
    SP_map_list = []
    S_map_list = []
    for idx, ii in enumerate(S_list):
        j = min(len(S_list[idx]), len(SP_list[idx]))
        for jj in range(j):
            if abs(S_list[idx][jj].stats.sac['az'] -
                   SP_list[idx][jj].stats.sac['az']) > az_bin:
                continue
            else:
                samp = S_list[idx][jj].stats.sampling_rate
                d_s = seispy.data.phase_window(S_list[idx][jj],
                                               phase=['S'],
                                               window=(-100, 100)).data
                s_s = seispy.data.phase_window(s_S_list[idx][jj],
                                               phase=['S'],
                                               window=(-100, 100)).data
                S_time = (len(d_s) / 2. -
                          np.argmax(correlate(s_s, d_s, mode='same'))) / samp
                #SP_list[idx][jj].data = hilbert(SP_list[idx][jj].data).imag
                SP_list[idx][jj].data = SP_list[idx][jj].data
                #s_SP_list[idx][jj].data = hilbert(s_SP_list[idx][jj].data).imag
                s_SP_list[idx][jj].data = s_SP_list[idx][jj].data
                d_sp = seispy.data.phase_window(SP_list[idx][jj],
                                                phase=['SP'],
                                                window=(-100, 100)).data
                s_sp = seispy.data.phase_window(s_SP_list[idx][jj],
                                                phase=['SP'],
                                                window=(-100, 100)).data
                SP_time = (len(d_sp) / 2. - np.argmax(
                    correlate(s_sp, d_sp, mode='same'))) / samp
                print 'SP_az: ', SP_list[idx][jj].stats.sac['az']
                print 'SP_gc: ', SP_list[idx][jj].stats.sac['gcarc']
                print 'S_az: ', S_list[idx][jj].stats.sac['az']
                print 'S_gc: ', S_list[idx][jj].stats.sac['gcarc']
                print SP_time, S_time
                SP_map_list.append((
                    SP_list[idx][jj].stats.sac['stla'],
                    SP_list[idx][jj].stats.sac['stlo'],
                    SP_time,
                    SP_list[idx][jj].stats.sac['az'],
                    SP_list[idx][jj].stats.sac['gcarc'],
                ))
                S_map_list.append((
                    S_list[idx][jj].stats.sac['stla'],
                    S_list[idx][jj].stats.sac['stlo'],
                    S_time,
                    S_list[idx][jj].stats.sac['az'],
                    S_list[idx][jj].stats.sac['gcarc'],
                ))
                if plot == True:
                    fig, ax = plt.subplots(2, 1)
                    ax[0].plot(d_s, color='k')
                    ax[0].plot(s_s, color='r')
                    ax[1].plot(d_sp, color='k')
                    ax[1].plot(s_sp, color='r')
                    plt.show()

    return S_map_list, SP_map_list
def get_auto_corr_coeffs(signal):
    n_channels = signal.shape[0]
    auto_corr_coeffs = np.zeros(n_channels)
    for channel in range(n_channels):
        auto_corr_coeffs[channel] = scipy.correlate(signal[channel],
                                                    signal[channel], "valid")
    return auto_corr_coeffs
Exemple #4
0
    def _execute(self, x, *args, **kwargs):
        # init
        epochs = []

        # per channel detection
        for c in xrange(self.nchan):
            # filter energy with window
            xings = sp.correlate(self.energy[:, c], self.window, 'same')
            # replace filter artifacts with the mean
            mu = xings[self.window.size:-self.window.size].mean()
            xings[:self.window.size] = xings[-self.window.size:] = mu
            ep = epochs_from_binvec(xings < self.zcr_th)

            epochs.append(ep)

        # pad and merge artifact epochs
        epochs = sp.vstack(epochs)
        if epochs.size > 0:
            epochs[:, 0] -= self.pad[0]
            epochs[:, 1] += self.pad[1]
        self.events = merge_epochs(epochs, min_dist=self.mindist)

        # return
        self.events = self.events.astype(INDEX_DTYPE)
        return x
Exemple #5
0
    def _execute(self, x, *args, **kwargs):
        # init
        epochs = []

        # per channel detection
        for c in xrange(self.nchan):
            # filter energy with window
            xings = sp.correlate(self.energy[:, c], self.window, 'same')
            # replace filter artifacts with the mean
            mu = xings[self.window.size:-self.window.size].mean()
            xings[:self.window.size] = xings[-self.window.size:] = mu
            ep = epochs_from_binvec(xings < self.zcr_th)
           
            epochs.append(ep)

        # pad and merge artifact epochs
        epochs = sp.vstack(epochs)
        if epochs.size > 0:
            epochs[:, 0] -= self.pad[0]
            epochs[:, 1] += self.pad[1]
        self.events = merge_epochs(epochs, min_dist=self.mindist)

        # return
        self.events = self.events.astype(INDEX_DTYPE)
        return x
def get_decorrelation_time(signal, sampling_frequency):
    """Computes decorrelation time.

    Parameters
    ----------
    signal : numpy.ndarray
        EEG data to compute the feature for
    sampling_frequency : int
        Data sampling rate

    Returns
    -------
    np.ndarray
        A 1xN numpy array of decorrelation times
    """
    n_channels = signal.shape[0]
    decorrelation_time = np.zeros((n_channels, 1))
    for channel in range(n_channels):
        decorr_idx = 0
        corr = scipy.correlate(signal[channel], signal[channel], "full")
        corr = np.roll(corr, len(signal[channel]))
        for i in range(len(corr)):
            if corr[i] < 0:
                decorr_idx = i
                break
        decorrelation_time[channel] = decorr_idx / sampling_frequency
    return decorrelation_time
Exemple #7
0
def decode(file_name):
    border.rotate(file_name)
    image = Image.open("temp.png")
    q = border.find("temp.png")
    ind = sp.argmin(sp.sum(q, 1), 0)
    up_left = q[ind, 0] + 2
    up_top = q[ind, 1] + 2
    d_right = q[ind+1, 0] - 3
    d_bottom = q[ind-1, 1] - 3

    box = (up_left, up_top, d_right, d_bottom)
    region = image.crop(box)
    h_sum = sp.sum(region, 0)
    m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
    s = sp.average(sp.diff(m))
    m = int(round(d_right - up_left)/s)
    if m % 3 != 0:
        m += 3 - m % 3
    n = int(round(d_bottom - up_top)/s)
    if n % 4 != 0:
        n += 4 - n % 4
    s = int(round(s))+1

    region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
    region.save("0.png")
    pix = region.load()
    matrix = mix.off(rec.matrix(pix, s, m, n))
    str2 = hamming.decode(array_to_str(matrix))

    return hamming.bin_to_str(str2)
Exemple #8
0
    def make_plot(self, *args):
        # get the old limits
        if self._haveData:
            xlim = self.axes.get_xlim()
            ylim = self.axes.get_ylim()            

        self.axes.cla()
        self.axes.grid(True)

        tup = self.get_data()
        if tup is None: return 
        t, data, dt, label = tup

        dt = t[1]-t[0]
        self._dt = dt # for get_msg

        #corr = cross_correlate(data, data, mode=2)
        corr = correlate(data, data, mode=2)
        corr = corr/corr[len(data)-1] # normed so autocorr at zero lag is 1
        lags = arange(-len(data)+1, len(data))*dt
        self.axes.plot(lags, corr)

        if self._haveData:
            self.axes.set_xlim(xlim)
            self.axes.set_ylim(ylim)


        self._haveData = True
        self.canvas.draw()
def calc_snr_matched_filter(data, widths=None):
    """
    Calculate S/N using several matched filter widths, then pick the highest S/N

    :param array data: timeseries data
    :param list widths: matched filters widhts to try
                        (Default: [1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500])
    :return: highest S/N (float), corresponding matched filter width (int)
    """
    if widths is None:
        # all possible widths as used by AMBER
        widths = [1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500]

    snr_max = 0
    width_max = None

    # get S/N for each width, store only max S/N
    for w in widths:
        # apply boxcar-shaped filter
        mf = np.ones(w)
        data_mf = correlate(data, mf)

        # get S/N
        snr = calc_snr_amber(data_mf)

        # store if S/N is highest
        if snr > snr_max:
            snr_max = snr
            width_max = w

    return snr_max, width_max
 def get_auto_corr_coeffs(self, signal):
     n_channels = len(signal.ch_names)
     auto_corr_coeffs = np.zeros(n_channels)
     for channel in range(0, n_channels):
         auto_corr_coeffs[channel] = scipy.correlate(
             signal._data[channel], signal._data[channel], "valid")
     return auto_corr_coeffs
def ffcalc(a, freq=None):
 """Returns the fundamental frequency of a string, a. Expects raw data,  the default frequency is 32,000. This uses brute force correlation which is slow for large data sets but more accurate than fft based methods. Returns the data in wavelength"""
 if freq==None: freq=32000
 corr=sc.correlate(a,a,mode='same')
 corr=corr[(len(corr)/2):(len(corr)-len(corr)/4)]
 dat=np.diff(np.where(np.diff(corr)>0,1,0))
 out=float(freq)/float(((list(dat)).index(-1)))
 return out
Exemple #12
0
def ffcalc(a, freq=None):
    """Returns the fundamental frequency of an array, a. Expects raw data,  the default frequency is 32,000. This uses brute force correlation which is slow for large data sets but more accurate than fft based methods. Returns the data in wavelength"""
    if freq==None: freq=32000
    corr=sc.correlate(a,a,mode='same')
    corr=corr[(len(corr)/2):(len(corr)-len(corr)/4)]
    dat=np.diff(np.where(np.diff(corr)>0,1,0))
    out=float(freq)/float(((list(dat)).index(-1)))
    return out
Exemple #13
0
def _mcfilter_py(mc_data, mc_filt):
    if mc_data.ndim != mc_filt.ndim > 2:
        raise ValueError('wrong dimensions: %s, %s' %
                         (mc_data.shape, mc_filt.shape))
    if mc_data.shape[1] != mc_filt.shape[1]:
        raise ValueError('channel count does not match')
    return sp.sum(
        [sp.correlate(mc_data[:, c], mc_filt[:, c], mode='same')
         for c in xrange(mc_data.shape[1])], axis=0)
 def channel_correlation(self, signal, auto_corr_coeffs, channel_1, channel_2):
   signal_1 = signal._data[channel_1];
   signal_2 = signal._data[channel_2];
   corr_1 = auto_corr_coeffs[channel_1]
   corr_2 = auto_corr_coeffs[channel_2]
   corr = scipy.correlate(signal_1, signal_2,"full")
   max_corr = np.max(corr)
   max_corr = max_corr / np.sqrt(corr_1*corr_2)
   return max_corr
 def win_corr_array(self):
     self.construct_win_factors()
     w = self.win_percentages()
     lx, ly = self.statArray.shape
     if len(w) != ly:
         print 'Should be {0:} teams, and input should be n by {0:}'.format(w)
     z = scipy.zeros((lx, ly))
     for i in range(lx):
         z[i] = scipy.correlate(w, self.statArray[i]) / scipy.sum(self.statArray[i])
     return z
Exemple #16
0
def createGraphs( Tbl, coords, data, pp):
    c =0
    titles =[]


    titles.append(coord + ' Coordinate vs Frames for the ball')
    titles.append('Autocorrelation of the ' + coord + ' coordinate for the ball')
    titles.append('Velocity in the ' + coord + ' direction for the ball')
    titles.append('Autocorrelation of the Velocity in the ' + coord + ' direction forthe ball')
    titles.append('Accelleration in the ' + coord + ' direction for the ball' )
    titles.append('Autocorrelation of the Accelleration in the ' + coord + ' direction for the ball')


    for column in Tbl.columns:
            print(c)
            fig = plt.figure()
            ax = fig.add_subplot(1,1,1)
            frames = data['frames'][:len(Tbl[column])]
            ax.plot(frames, Tbl[column])
            ax.set_title(titles[c])
            ax.set_xlabel('Frames')
            ax.set_ylabel(coord+' position value')
            
            pp.savefig()
            plt.close(fig)
             
            fig = plt.figure()

            ax2 = fig.add_subplot(1,1,1)
            #get autoCorrelation
            corr= scipy.correlate(Tbl[column], Tbl[column], mode = 'same')
            ax2.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax2.set_title(titles[c+1])
            
            pp.savefig()
            plt.close(fig)
            #zoomed in
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1])
            ax3.set_xlim(0, 1000)
            
            pp.savefig()
            plt.close(fig)
            
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1])
            ax3.set_xlim(0, 200)
            #plt.plot()
            pp.savefig()
            plt.close(fig)
            c +=2
 def correlate_specs(self):
     """ determine x shift referenced to the first spectrum
     x values are shifted by this value"""
     self.shifts.append(0)
     dx = 0.1
     xfine = np.arange(np.min(self.specs[0][0]), np.max(self.specs[0][0]), dx) # 10 times oversampling
     ref = sp.interp(xfine, self.specs[0][0], self.specs[0][1])
     self.correlations = []
     self.correlations.append(sp.correlate(ref, 
                                         ref, mode='full'))
     for i in range(1, len(self.specs)):
         currfine = sp.interp(xfine, self.specs[i][0], self.specs[i][1])
         self.correlations.append(sp.correlate(ref, currfine,  mode='full'))
     for i in range(1, len(self.specs)):
         shift = np.argmax(self.correlations[i]) - np.argmax(self.correlations[0])
         shift = shift * dx           
         self.specs[i][0] = self.specs[i][0] + shift
         self.shifts.append(shift)
         if self._verbose:
             print "spectrum {:d} shifted by {:.2f}".format(i, shift)
Exemple #18
0
def _mcfilter_py(mc_data, mc_filt):
    if mc_data.ndim != mc_filt.ndim > 2:
        raise ValueError('wrong dimensions: %s, %s' %
                         (mc_data.shape, mc_filt.shape))
    if mc_data.shape[1] != mc_filt.shape[1]:
        raise ValueError('channel count does not match')
    return sp.sum([
        sp.correlate(mc_data[:, c], mc_filt[:, c], mode='same')
        for c in xrange(mc_data.shape[1])
    ],
                  axis=0)
Exemple #19
0
def corrintegral(h1, h2, initial, f):
    match_i = initial
    match_f = f
    z = sci.correlate(h1, h2[match_i:match_f], mode='full')
    abs_z = np.abs(z)
    w = np.argmax(abs_z) - len(h2[match_i:match_f]) + 1
    delta_w = w + len(h2[match_i:match_f])
    h2p_norm = np.linalg.norm(h2[match_i:match_f])
    h1p_norm = np.linalg.norm(h1[w:delta_w])
    norm_z = abs_z / (h1p_norm * h2p_norm)
    return np.amax(norm_z), w
Exemple #20
0
def run ():
	exp = 99
	listX = brainsim.loadBrain ("vb-exp" + str (exp) + "-XReport.txt")
	listY = brainsim.loadBrain ("vb-exp" + str (exp) + "-YReport.txt")
	cntX, spikesX = countSpikes (listX)
	cntY, spikesY = countSpikes (listY)
	print cntX
	print cntY
	print spikesX
	print spikesY
	print "SIGNAL CORRELATION: ", scipy.correlate (spikesX, spikesY)
	print "MAX: ",  scipy.correlate (spikesX, spikesY).argmax()
	print "Total spike correlation (Brian func.): ", totalCorrelation(spikesX,spikesY,cntX,cntY)

	from matplotlib import pyplot

	pyplot.subplot (211)
	pyplot.plot (numpy.linspace (0.0, len (listX), len (listX)), listX)
	pyplot.subplot (212)
	pyplot.plot (numpy.linspace (0.0, len (listY), len (listY)), listY)
	pyplot.show ()
def find_centers(n, collapse, h, fwhm, spacing):
    x, y = generate_model(n, h, fwhm, spacing)
    y_corr = scipy.correlate(collapse, y)
    x_corr = scipy.linspace(0, len(y_corr)-1, num=len(y_corr))

    peak = x_corr[numpy.argsort(y_corr)[-1]]

    centers = []
    for i in range(n):
        centers.append((i+1)*spacing+peak)

    return numpy.array(centers)
Exemple #22
0
def pitch(x, fs, pitchrange=[12,120], mode='corr'):
    if mode=='corr':
        corr = scipy.correlate(x, x, mode='full')[len(x)-1:]
        corr[:int(fs/midi2hz(pitchrange[1]))] = 0
        corr[int(fs/midi2hz(pitchrange[0])):] = 0
        indmax = scipy.argmax(corr)
    elif mode=='ceps':
        y = rceps(x)
        y[:int(fs/midi2hz(pitchrange[1]))] = 0
        y[int(fs/midi2hz(pitchrange[0])):] = 0
        indmax = scipy.argmax(y)
    return hz2midi(fs/indmax)
def nanxcov(x, y, flag):
    """ Computes the lag covariance, c, between vectors x and y for lag t from -(N-1) to +(N-1)
    N is the length of the input vectors
    compute the lag covariance, c, between vectors x and y
    for lag, t, from -(N-1) to +(N-1),
    where N is the length of the input vectors.
    c(t)=E[(x(t'+t)-E[x])*conj(y(t')-E[y])].
    if flag=0, the covariance is normalized by the length of the lagged vectors
    after NaNs have been removed, stored in n  (rem: if n(t)=0, c(t)=NaN);
    if flag=1, the covariance is normalized by n(0)."""
    x = np.transpose(np.asarray(x))
    y = np.transpose(np.asarray(y))
    N = len(x)
    I = np.isnan(x)
    J = np.isnan(y)
    x[I] = 0
    y[J] = 0
    zx = np.ones(N)
    zx[I] = 0
    zy = np.ones(N)
    zy[J] = 0
    n = np.correlate(zx, zy, mode='full')

    x = x - np.mean(x)
    y = y - np.mean(y)

    if not flag in [0, 1]:
        raise ValueError("""flag must be 0 or 1""")

    if flag == 0:
        c = sc.correlate(x, y, mode='full') / n
    else:
        n0 = sc.correlate(zx, zy, mode='valid')
        c = np.correlate(x, y, mode='full') / n0

    fi = np.where(np.asarray(c) == np.inf)
    c[fi] = np.nan
    c = np.ma.masked_array(c, [np.isnan(xf) for xf in c])

    return c, n
 def get_decorrelation_time(self, signal):
   n_channels = len(signal.ch_names)
   decorrelation_time = np.zeros( [n_channels, 1] )
   for channel in range(0, n_channels):
     decorr_idx=0;
     corr = scipy.correlate(signal._data[channel], signal._data[channel],"full")
     corr = np.roll(corr,len(signal._data[channel]))
     for i in range(0,len(corr)):
       if(corr[i] < 0):
         decorr_idx=i
         break
     decorrelation_time[channel] = decorr_idx / self.sampling
   return decorrelation_time
Exemple #25
0
 def auto_lag(self):
     """Automatically calculate the optimal lag"""
     # Use scipy to correlate #
     correlation = scipy.correlate(self.wetlab_peaks_prop, self.digital_peaks_prop_cut, mode='full')
     # Make a dictionary of possible lags #
     length = len(self.wetlab_peaks_prop)
     lags = xrange(-length+1, length)
     lag_dict = dict(zip(lags,correlation))
     # Apply a hard coded search cutoff #
     for lag in lag_dict:
         if lag < -10 or lag > 10: lag_dict[lag] = 0
     # Best lag #
     return max(lag_dict, key=lag_dict.get)
def my_xcorr(v, max_lags=20):
    v = v - numpy.mean(v)
    xc = scipy.correlate(v, v, 'full')
    lags = numpy.array(range(-len(v) + 1, len(v)))
    
    some = numpy.nonzero(numpy.abs(lags) <= max_lags)
    
    #max_value = numpy.nonzero(numpy.abs(lags) == 0)
    xc = xc[some] 
    lags = lags[some]
    
    xc = xc / numpy.max(xc)
    
    return xc, lags
Exemple #27
0
def xorrFeatureMax(feature1, feature2, window, step=1):
    size1 = len(feature1)
    size2 = len(feature2)
    maxValue = 0
    if window > size1 or window > size2:
        print('Window size is too large')
        return 0
    for i in range(0, size1 - window, step):
        for j in range(0, size2 - window, step):
            xorrValue = sp.correlate(feature1[i:(i + window)],
                                     feature2[i:(i + window)])
            if xorrValue > maxValue:
                maxValue = xorrValue
    return maxValue
Exemple #28
0
def epochs_from_binvec(binvec):
    """returns the discrete epochs where the :binvec: is true

    :type binvec: ndarray
    :param binvec: one-domensinal boolean ndarray.
    :returns: ndarray - epoch set where :binvec: is True [[start, end]]
    """

    # early exit
    if not binvec.any():
        return sp.zeros((0, 2))

    # calculate
    output = sp.correlate(sp.concatenate(([0], binvec, [0])), [-1, 1], "same")
    return sp.vstack(((output > 0).nonzero()[0] - 1, (output < 0).nonzero()[0] - 2)).T
Exemple #29
0
def calcMultipleSpikeTrainCCs(trains):
    """
    Calculate the cross-correlograms of multiple spike trains. The results are put in an array of nx (n+1)/2 elements. Example:
    ccs, taus= calcMultipleSpikeTrainCCs(trains)
    """
    ccs = list()
    taus = list()
    nTrains = len(trains)
    for m in sc.arange(nTrains):
        for n in sc.arange(nTrains):
            cc=sc.correlate(trains[m],trains[n], mode='full')
            ccs.append(cc)
            nPts=len(cc)
            tau=sc.arange(-nPts/2.0,nPts/2.0)
            taus.append(tau)
    return ccs,taus
 def M_step(self):
     """
     compute the means, sqrt of the diagonal of the covariance matrix
     and we also compute the normalizing constant
     """
     # need to do many convolutions
     # and average over them
     self.marginalized_translations = np.array([
             np.array([ correlate(datum,affinity_row)
                        for datum, affinity_row in zip(self.data_mat,
                                                       component_affinities)])
             for component_affinities in self.affinities])
     self.means = self.marginalized_translations.mean(1)
     self.covs = ((self.marginalized_translations - self.means)**2).mean(1)
     # 
     self.norm_constants = self.norm_constant_constant -.5 * np.sum(np.log(self.covs),1)
Exemple #31
0
def epochs_from_binvec(binvec):
    """returns the discrete epochs where the :binvec: is true

    :type binvec: ndarray
    :param binvec: one-domensinal boolean ndarray.
    :returns: ndarray - epoch set where :binvec: is True [[start, end]]
    """

    # early exit
    if not binvec.any():
        return sp.zeros((0, 2))

    # calculate
    output = sp.correlate(sp.concatenate(([0], binvec, [0])), [-1, 1], 'same')
    return sp.vstack(
        ((output > 0).nonzero()[0] - 1, (output < 0).nonzero()[0] - 1)).T
 def __init__(self,
              num_mix,
              data_mat,
              component_length):
     """
     num_mix is an integer
     data_mat is assumed to be 2-d (so we have a collection
     of 1-d signals).
     component_length is how long the mixture components should be
       this implicitly parameterizes how much translation is allowed
     """
     self.num_mix = num_mix
     self.data_mat = data_mat
     self.num_data, self.data_length = data_mat.shape
     # to make likelihood computation go faster
     self.rep_data_mat = np.tile(self.data_mat.reshape(
             self.num_data,
             1,
             self.data_length),
                                 (1,
                                  self.num_mix,
                                  1))
     assert self.data_mat.ndim == 2
     self.component_length = component_length
     self.trans_amount = self.data_length - self.component_length + 1
     # shifted versions of the data
     self.trans_data_mat = np.array([
             np.array([
                     correlate(datum,unit_vec)
                     for unit_vec in np.eye(self.trans_amount)])
             for datum in self.data_mat])
     self.affinities = np.zeros((self.num_mix,
                                 self.num_data,
                                 self.trans_amount))
     # initialize variables as None so that we know they are
     # defined later
     self.means = None
     self.covs = None
     self.norm_constants = None
     self.mix_weights = None
     self.log_likelihood = - np.inf
     # uniform weighting over the transitions
     self.trans_weights = np.ones(self.trans_amount,dtype=np.float64)
     self.trans_weights /= np.sum(self.trans_weights)
     self.init_templates()
     self.max_affinities = np.zeros(
         self.num_data)
Exemple #33
0
 def generate(self, X, Y):
     # Module #
     import scipy
     # Get the long numeric vectors (fills memory) #
     x = [score for chrom in X for score in X.score_vector(chrom)]
     y = [score for chrom in Y for score in Y.score_vector(chrom)]
     # Corr will contain the numeric vector #
     self.corr = scipy.correlate(x, y, mode='full')
     # Graph it #
     fig, axes = make_default_figure()
     axes.set_title('Correlation of "' + X.name + '" and "' + Y.name + '"')
     axes.set_xlabel('Shift [base pairs]')
     axes.set_ylabel('Correlation [no units]')
     axes.plot(self.corr)
     widen_axis(axes)
     # Return a figure #
     return fig
def x_cor(wv_arr, std_flux_arr, obj_flux_arr):
#instance variables & overhead---------------
	data_root = '/Users/emilylemonier/Data/Python/MIKE_FIVE_RV/Raw/'
	nLoops = 5000
	rv_array = [None]*nLoops
	inputs = sys.argv
	
	#CORRELATION ---------------------------------------
	#xdata
	i = 0
	num = 0
	for k in range(0, len(wv_arr)-1):
		i += wv_arr[k+1]-wv_arr[k]
		num += 1
		distancePerLag = i/num #computed as avg of distances per lag

		# what youre comparing
		offset = distancePerLag
		#y1 = fx_std_interp
		#y2 = fx_obj_interp

		#compute the cross-correlation between y1 and y2
		ycorr = scipy.correlate(std_flux_arr, obj_flux_arr, mode='same')
		#xcorr = scipy.linspace(0, len(ycorr)-1, num=len(ycorr))

	#TEST PLOT
	plt.figure(1)
	plt.subplot(211)
	plt.plot(ycorr, 'b')
	plt.title('correlation plots of ycorr vs xcorr')
	plt.show()
	#pylab.subplot(211)
	#pylab.plot(wv_std_good, fx_obj_good, 'b')
	#pylab.plot(wv_obj_good, fx_obj_good, 'r')
	#pylab.show()


	# define a gaussian fitting function where
	# p[0] = amplitude
	# p[1] = mean
	# p[2] = sigma.
	fitfunc = lambda p, x: p[0]*scipy.exp(-(x-p[1])**2/(2.0*p[2]**2))
	errfunc = lambda p, x, y: fitfunc(p,x)-y
Exemple #35
0
def findSpectrumShift(x, flat, x_sm, y_sm):
    """
    This routine finds the wavelength and continuum shifts for a given
    wavelength window
    """
    #fig = pyplot.figure(0)
    #ax=fig.add_axes([0.1, 0.1, 0.8, 0.8])
    window = scipy.where( (x > min(x_sm)) & 
            (x < max(x_sm)) )[0]
    feature_x = x[window]
    fine_x = numpy.linspace(min(feature_x), max(feature_x), num=len(feature_x)*10.0)
    model = scipy.interpolate.interpolate.interp1d(x_sm, y_sm, kind='linear',bounds_error = False)
    observed = scipy.interpolate.interpolate.interp1d(feature_x, flat[window], kind='linear', bounds_error=False)
    ycorr = scipy.correlate((1.0-observed(fine_x)), (1.0-model(fine_x)), mode='full')
    xcorr = scipy.linspace(0, len(ycorr)-1, num=len(ycorr))

    #fitfunc = lambda p,x: p[0]*scipy.exp(-(x-p[1])**2.0/(2.0*p[2]**2.0)) + p[3]
    #errfunc = lambda p,x,y: fitfunc(p,x) - y

    x_zoom = xcorr[int(len(ycorr)*0.45): int(len(ycorr)*0.55)]
    y_zoom = ycorr[int(len(ycorr)*0.45): int(len(ycorr)*0.55)]
    max_index = numpy.argsort(y_zoom)[-1]
    offset_computed = (x_zoom[max_index] - len(xcorr)/2.0)/10.0*(feature_x[1]-feature_x[0])
    #ax.plot(x_zoom, y_zoom)
    #ax.plot(xcorr, ycorr)
    #fig.show()
    #raw_input()

    #p_guess = [ycorr[len(ycorr)/2], len(ycorr)/2, 3.0, 0.0001]
    #p1, success = scipy.optimize.leastsq(errfunc, p_guess, args = (x_zoom, y_zoom))

    #fit = p1[0]*scipy.exp(-(x_zoom-p1[1])**2/(2.0*p1[2]**2)) + p1[3]

    #xcorr = p1[0]
    #nLags = xcorr-(len(window)-1.5)
    #offset_computed = nLags*(feature_x[0]-feature_x[1])
    #if (abs(offset_computed) > 20):
    #    print 'Ha!', offset_computed
    #    offset_computed = 0
    #print asdf

    return offset_computed
Exemple #36
0
def get_cc(y1, y2, x1, x2):
    """
    Easy way of estimating the shift to the nearest pixel (given in
    wavelength units).

    y1 = flux of spec1
    y2 = flux of spec2
    x1 = wavelengths of spec1
    x2 = wavelengths of spec2
    """

    if x1.size > x2.size:
        m = (x1 > x2.min()) * (x1 < x2.max())
        yuse = [y1[m], y2]
    else:
        m = (x2 > x1.min()) * (x2 < x1.max())
        yuse = [y1, y2[m]]

    cc = sp.correlate(yuse[0], yuse[1], mode='same')
    i = sp.where(cc == cc.max())[0]
    shift = (x1[1] - x1[0]) * (cc.size // 2 - i)
    return shift
Exemple #37
0
stuff = A.dot(Chat)
#plt.show(plt.plot(stuff[:]))

# restore to full resolution using interpolation
mod_rest = np.interpolate.griddata(mod_time, stuff,    T)
val_rest = np.interpolate.griddata(val_time, val_data, T)
#val_rest = [val_rest[i+1] if np.isnan(val_rest[i]) else val_rest[i] for i in range(len(val_rest))]
val_rest[0] = val_rest[1]
plt.plot(mod_rest, 'b-+')
plt.plot(val_rest, 'r-+')
plt.grid()
plt.show()

residual = mod_rest-val_rest
print(residual)
correlation = np.correlate(residual-np.mean(residual), residual-np.mean(residual), mode = 2)
n_lags   = len(correlation)
lags     = np.arange(-n_lags/2, n_lags/2)+1

start = 0
end   = -1

#==============================================================================
# Plotting
#==============================================================================
do_plot = 1
if do_plot:
    
    plt.close("all")
    #plt.plot(XX_k1k1[0,:]-X_k[0,:])
    
component_length = 12
num_data,data_length = data_mat.shape
trans_amount = data_length-component_length +1 
num_mix = 1
affinities = np.zeros((num_mix,num_data,trans_amount))
rand_mix_idx = np.random.randint(num_mix,
                                         size=(num_data))
affinities[rand_mix_idx,
           np.arange(num_data),
                        np.zeros(num_data,
                                 dtype=int)] = 1.

np.array([
                np.array([
                        correlate(datum,affinity_row)
                        for affinity_row in affinity])
                for datum, affinity in zip(data_mat,
                                           affinities)])


marginalized_translations = np.array([
        np.array([ correlate(datum,affinity_row)
                  for datum, affinity_row in zip(data_mat,
                                                 component_affinities)])
        for component_affinities in affinities])

means = marginalized_translations.mean(1)
covs = ((marginalized_translations - means)**2).mean(1)
# 
norm_constants = -.5 * np.log((2.*np.pi)**num_mix * np.prod(covs,1))
Exemple #39
0
    new_x = np.hstack((x[1:], x))
    ax0.plot(x, 'g')
    new_y = scipy.convolve(new_x, h, 'valid')
    ax1.plot(new_y, 'r')
    ax1.plot(y, 'k')
    delta = new_y - y
    ax1.plot(delta)
    fig.savefig('p56_convolve_2.png')

if 1:
    ax0.clear()
    ax1.clear()
    ax0.plot(X, 'k')
    ax0.plot(H, 'b')
    new_X = np.hstack((X[1:], X))
    AC = scipy.correlate(new_X, H)
    ax0.plot(AC, c='r')

    x = np.fft.ifft(X)
    h = np.fft.ifft(H)
    hdag = h.conj()
    XH = np.fft.fft(x * hdag)
    ax1.plot(x, 'k')
    ax1.plot(h, 'b')
    ax1.plot(x * h, 'r')
    ax1.plot(XH, 'k:')
    AC_from_ft = XH * Nx
    renorm = (XH * Nx).max() / AC.max()
    renorm = 0.5
    AC_from_ft = AC_from_ft / renorm
    ax0.plot(AC_from_ft, 'k:')
Exemple #40
0
def ccf(data):
    return np.array([sp.correlate(p, np.concatenate((p,p))) for p in data])
Exemple #41
0
def giveCorrelation():
	xcorr = scipy.correlate(A, B)
	return xcorr
Exemple #42
0
#!/usr/bin/env python

import scipy as sp
import numpy as np
import matplotlib.pyplot as plt

size = 1024
a = sp.random.normal(size=(size, ))
fft_a = np.fft.fftshift(np.fft.fft(a))

N = 8
b = sp.hstack((a, ) * N)
fft_b = np.fft.fftshift(np.fft.fft(b))

c = sp.correlate(b, b, 'full')

plt.subplot(411)
plt.title('A(f)')
plt.plot(abs(fft_a))

plt.subplot(412)
plt.title('b(t)')
plt.plot(b)

plt.subplot(413)
plt.title('B(f)')
# Note how non-zero values exist only at multiples of N.
plt.stem(sp.arange(fft_b.size), abs(fft_b))

plt.subplot(414)
plt.title('Autocorrelation of b(t)')
Exemple #43
0
def autocorr(x):
    result = sp.correlate(x, x, mode='full')
    return result[result.size/2:]
def xcorr(x, y=None, maxlags=None, norm='ceoff',doDetrend=False):
    '''
    Cross-correlation using scipy.correlate

    copy from http://subversion.assembla.com/svn/PySpectrum/trunk/src/spectrum/correlation.py
    and futher modified for flow analysis.

    Estimates the cross-correlation (and autocorrelation) sequence of a random
    process of length N. By default, there is no normalisation and the output
    sequence of the cross-correlation has a length 2*N+1.

    Arguments:
        * x: first data array of length N
        * y: second data array of length N. If not specified, computes the
        autocorrelation.
        * maxlags: compute cross correlation between [-maxlags:maxlags]
        when maxlags is not specified, the range of lags is [-N+1:N-1].
        * norm: ['biased', 'unbiased', None, 'coeff'] normalisation
        * doDetrend: [bool()] do a data detrend. Useful from data from mesurment

    The true cross-correlation sequence is:
        * r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])

    However, in practice, only a finite segment of one realization of the
    infinite-length random process is available.

    The correlation is estimated using numpy.correlate(x,y,'full').
    Normalisation is handled by this function using the following cases:

        * 'biased': Biased estimate of the cross-correlation function
        * 'unbiased': Unbiased estimate of the cross-correlation function
        * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0.

    returns:
        * xcorr: [np.array, shape=(N-1,1)]a numpy.array containing the cross-correlation sequence
        * lags: [np.array, shape=(N-1,1)] lag vector

    notes:
        * If x and y are not the same length, the shorter vector is
        zero-padded to the length of the longer vector.
    '''
    N = len(x)
    if y == None:
        y = x

    if doDetrend:
        x=spsig.detrend(x)
        y=spsig.detrend(y)

    assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
    assert maxlags <= N, 'maxlags must be less than data length'

    if maxlags == None:
        maxlags = N-1
        lags = np.arange(0, 2*N-1)
    else:
        assert maxlags < N
        lags = np.arange(N-maxlags-1, N+maxlags)

    res = sp.correlate(x, y, mode='full')

    if norm == 'biased':
        Nf = float(N)
        res = res[lags] / float(N)    # do not use /= !!
    elif norm == 'unbiased':
        res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
    elif norm == 'coeff':
        Nf = float(N)
        rms = stat.rms(x) * stat.rms(y)
        #rms = (np.mean(x**2)*np.mean(y**2))**(0.5)
        res = res[lags] / rms / Nf
    else:
        res = res[lags]

    lags = np.arange(-maxlags, maxlags+1)
    return res, lags
Exemple #45
0
    for n in sc.arange(rows*cols):
        ax[n].legend()

    gr.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.98,
                       wspace=0.1, hspace=0.1)
    gr.ion(); gr.draw()



if 0: 
    fileName='T-Esther.csv'
    data=readColumnCSV(dataDir='./', delimiter=',', fileName =fileName, nHeaderRows=4)
    tStamps= data['values'].transpose()
    s1 = tStamps[10]
    s2 = tStamps[30]
    cc=sc.correlate(s1,s2, mode='full')
    nPts=len(cc)
    tau = sc.arange(-nPts/2, nPts/2)
    figg= gr.figure()
    gr.plot(tau,cc)


# Example: Create a spike train, threshold it, show the thresholds
if 0: 
    tr, isis, ifrs= createNGammaTrains(nPulses=500, nTrains=1, graph=0.0)
    train=tr[0];ifr=ifrs[0]; isi=isis[0]
    myBins= sc.unique(sc.sort(ifr))
    cdf,cdfInverse = calcCDF(bins=myBins, sample=ifr)
    alphaX= calcThresholdsFromCDF(cdf=cdf, quantValues=[0.1,0.9])
    fig=gr.figure()
    ax1= fig.add_subplot(121)
Exemple #46
0
import scipy

plot = False

atol = 1e-1
rtol = 1e-1

sizeA = 3
a = scipy.arange(sizeA)

sizeB = 10
b = scipy.arange(sizeB)

d = loudia.Correlation(sizeA, sizeB)
r = d.process(a, b)
s = scipy.correlate(a, b, 'full')
print scipy.allclose(r[0, :], s, atol=atol, rtol=rtol)

d = loudia.Correlation(sizeA, sizeB, sizeA + sizeB, -(sizeA + sizeB), True)
r = d.process(b, a)
s = scipy.correlate(a, b, 'full')
print scipy.allclose(r[0, :], s, atol=atol, rtol=rtol)

if plot:
    import pylab
    pylab.figure()
    pylab.plot(r[0, :], label='loudia')
    pylab.plot(s, label='scipy')
    pylab.legend()
    pylab.show()
            chipSize = 0
            # ycorr = scipy.correlate(cont[chip_ranges[0][0] - 100:chip_ranges[2][1] + 100], mspec[chip_ranges[0][0] - 100:chip_ranges[2][1] + 100], mode="full")
            for i, chipRange in enumerate(chip_ranges):
                chip = spec[i]
                print(np.where(chip == 0.0))
                chip[np.where(chip <= 0.01)[0]] = 0.0
                cont = chip / max(chip)
                if chipSize < chipRange[1] - chipRange[0]:
                    chipSize = chipRange[1] - chipRange[0]
                midPoint = chipRange[0] + (chipRange[1] - chipRange[0]) / 2
                visitMid = 2046
                visitRange = 1217  # 1277#1338#1460 #1894 / 2 lowest model chip range... #2046 # 4092 / 2
                """lo = midPoint-visitRange
				if (midPoint-visitRange < 0):"""
                print(len(mspec[midPoint - visitRange : midPoint + visitRange - 1]))
                chipCCF = scipy.correlate(mspec[midPoint - visitRange : midPoint + visitRange - 1], cont, mode="full")

                if ycorr.size == 0:
                    ycorr = chipCCF
                else:
                    ycorr += chipCCF

            ycorr /= len(chip_ranges)
            ycorr -= np.median(ycorr)
            # Generate an x axis
            xcorr = np.arange(ycorr.size)
            # Convert this into lag units, but still not really physical
            # lags = xcorr - (1401 - 1)
            lags = xcorr - (chip_ranges[0][1] - chip_ranges[0][0] - 1)
            temp = np.where(np.logical_or(lags == -lagrange, lags == lagrange))[0]
            ycorr_diff = ycorr - scipy.ndimage.filters.gaussian_filter1d(ycorr, 100)
Exemple #48
0
def createGraphs( Tbl, x, data, colData, pp):
    c =0
    titles =[]

    if x == True:
        colData.append('X')
    else:
        colData.append('Y')
        
    titles.append(colData[2] + ' Coordinate vs Frames for player ' + colData[1]+ ' for the ' + colData[0] + 'team')
    titles.append('Autocorrelation of the ' + colData[2] + ' coordinate for player ' + colData[1]+ ' for the ' + colData[0] + 'team')
    titles.append('Velocity in the ' + colData[2] + ' direction for player ' + colData[1]+ ' for the ' + colData[0] + 'team')
    titles.append('Autocorrelation of the Velocity in the ' + colData[2] + ' direction for player ' + colData[1]+ ' for the ' + colData[0] + 'team')
    titles.append('Accelleration in the ' + colData[2] + ' direction for player ' + colData[1] + ' for the ' + colData[0] + ' team' )
    titles.append('Autocorrelation of the Accelleration in the ' + colData[2] + ' direction for player ' + colData[1]+ ' for the ' + colData[0] + 'team')


    for column in Tbl.columns:
            fig = plt.figure()
            ax = fig.add_subplot(1,1,1)
            frames = data['Frames'][:len(Tbl[column])]
            ax.plot(frames, Tbl[column])
            ax.set_title(titles[c])
            ax.set_xlabel('Frames')
            ax.set_ylabel(colData[2]+' position value')
            
            pp.savefig()
            plt.close(fig)
             
            fig = plt.figure()

            ax2 = fig.add_subplot(1,1,1)
            #get autoCorrelation
            corr= scipy.correlate(Tbl[column][:70000], Tbl[column][:70000], mode = 'same')
            ax2.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax2.set_title(titles[c+1])
            
            pp.savefig()
            plt.close(fig)
            #zoomed in
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1])
            ax3.set_xlim(0, 1000)
            
            pp.savefig()
            plt.close(fig)
            
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1])
            ax3.set_xlim(0, 200)
            
            pp.savefig()
            plt.close(fig)
            #plt.plot()
            fig = plt.figure()
            ax2 = fig.add_subplot(111)
            corr= scipy.correlate(Tbl[column][:70000], Tbl[column][:70000], mode = 'same')
            ax2.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax2.set_title(titles[c+1] +'first half')
            
            pp.savefig()
            plt.close(fig)
            
            #zoomed in
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1] +' First half')
            ax3.set_xlim(0, 1000)
            
            pp.savefig()
            plt.close(fig)
            
            fig = plt.figure()
            ax3 = fig.add_subplot(111)
            ax3.plot(corr[int(corr.size/2):]) #symmetric fn - just plot half
            ax3.set_title(titles[c+1] + ' first half')
            ax3.set_xlim(0, 200)
            
            pp.savefig()
            plt.close(fig)
            #plt.plot()
            c += 2
Exemple #49
0
    def get_delay(self, a, b):

        corr = scipy.correlate(a, b, mode='same')
        return (np.argmax(abs(corr)) - a.size / 2) / self.f
    def Fit(self, plot=False):
        #main function. Will plot each order separately, and allow user to interact if plot=True
        self.clicks = []
        #interpolate the telluric (earth's atmospheric transmission) function
        Telluric = scipy.interpolate.UnivariateSpline(self.telluric.x, self.telluric.y, s=0)
        print "Plotting... press i to begin clicking points, and d when done"
        outfile = open("residuals.log", "w")
        outfile2 = open("UsedLines.log", "a")

        linelist = np.loadtxt(utils.LineListFile)

        #Loop over the spectral orders
        for i in range(37, 51):
            print "Fitting order #" + str(i + 1)
            self.orderNum = i
            self.fitpoints = fitpoints()
            wave = self.orders[i].x
            flux = self.orders[i].y / self.orders[i].cont
            tell = Telluric(wave)
            print "wave = ", wave

            #Do a cross-correlation first, to get the wavelength solution close
            ycorr = scipy.correlate(flux - 1.0, tell - 1.0, mode="full")
            xcorr = np.arange(ycorr.size)
            lags = xcorr - (flux.size - 1)
            distancePerLag = (wave[-1] - wave[0]) / float(wave.size)
            offsets = -lags * distancePerLag
            offsets = offsets[::-1]
            ycorr = ycorr[::-1]

            fit = np.poly1d(np.polyfit(offsets, ycorr, ycorr.size / 100))
            ycorr = ycorr - fit(offsets)
            left = np.searchsorted(offsets, -1.0)
            right = np.searchsorted(offsets, +1.0)
            maxindex = ycorr[left:right].argmax() + left
            print "maximum offset: ", offsets[maxindex], " nm"
            pylab.plot(offsets, ycorr)
            pylab.show()
            userin = raw_input("Apply Cross-correlation correction? ")
            if "y" in userin:
                self.orders[i].x = self.orders[i].x + offsets[maxindex]

            #Fit using the (GridSearch) utility function
            data = DataStructures.xypoint(self.orders[i].x.size)
            data.x = np.copy(self.orders[i].x)
            data.y = np.copy(self.orders[i].y)
            data.cont = np.copy(self.orders[i].cont)
            fitfcn, offset = FitWavelength2(data, self.telluric, linelist)
            self.orders[i].x = fitfcn(self.orders[i].x - offset)

            #Let user fix, if plot is true
            if plot:
                #First, just plot all at once so user can examine fit
                left = np.searchsorted(self.telluric.x, self.orders[i].x[0])
                right = np.searchsorted(self.telluric.x, self.orders[i].x[-1])
                pylab.plot(self.orders[i].x, self.orders[i].y / self.orders[i].cont, label="data")
                pylab.plot(self.telluric.x[left:right],
                           self.telluric.y[left:right] * BSTAR(self.telluric.x[left:right]), label="model")
                pylab.legend(loc='best')
                pylab.title("Order " + str(self.orderNum + 1))
                pylab.show()

                #We only want to plot about 3 nm at a time
                spacing = 3.0
                data_left = 0
                data_right = np.searchsorted(self.orders[i].x, self.orders[i].x[0] + spacing)
                while (data_left < self.orders[i].x.size):
                    #Bind mouseclick:
                    self.fig = pylab.figure()
                    self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)

                    left = np.searchsorted(self.telluric.x, self.orders[i].x[data_left])
                    right = np.searchsorted(self.telluric.x,
                                            self.orders[i].x[min(self.orders[i].x.size - 1, data_right)])
                    pylab.plot(self.orders[i].x[data_left:data_right],
                               self.orders[i].y[data_left:data_right] / self.orders[i].cont[data_left:data_right],
                               label="data")
                    pylab.plot(self.telluric.x[left:right],
                               self.telluric.y[left:right] * BSTAR(self.telluric.x[left:right]), label="model")
                    pylab.legend(loc='best')
                    pylab.title("Order " + str(self.orderNum + 1))
                    pylab.show()
                    data_left = data_right
                    data_right = np.searchsorted(self.orders[i].x,
                                                 self.orders[i].x[min(self.orders[i].x.size - 1, data_left)] + spacing)


                #Once you close the window, you will get past the pylab.show() command
                #Fit the points to a cubic
                #This is done in a loop, to iteratively remove outliers
                done = False
                while not done:
                    #self.fitpoints is filled when you are clicking in the window
                    if (len(self.fitpoints.x) > 3):
                        pars = np.polyfit(self.fitpoints.x, self.fitpoints.y, 3)
                    else:
                        pars = [0, 1, 0]  #y=x... meaning don't try to improve on this order
                    func = np.poly1d(pars)
                    ignorelist = []
                    x = np.array(self.fitpoints.x)
                    y = np.array(self.fitpoints.y)
                    resid = y - func(x)  #residuals from the fit
                    mean = resid.mean()
                    std_dev = resid.std()

                    #Find outliers (points with residuals over 0.01 or more than 2.5
                    #   standard deviations from the mean
                    for j in range(len(self.fitpoints.x)):
                        residual = self.fitpoints.y[j] - func(self.fitpoints.x[j])
                        if np.abs(residual) > 0.01 or np.abs(residual) > std_dev * 2.5:
                            ignorelist.append(j)
                    if len(ignorelist) == 0:
                        done = True
                    else:
                        for index in ignorelist[::-1]:
                            print "removing point ", index, " of ", len(self.fitpoints.x)
                            self.fitpoints.x.pop(index)
                            self.fitpoints.y.pop(index)

                #Done removing outliers. Apply fit to the wavelengths
                print "y = ", pars[0], "x^2 + ", pars[1], "x + ", pars[2]
                self.orders[i].x = func(self.orders[i].x)

                #Output the residuals, and plot them. Make sure they look alright
                for j in range(len(self.fitpoints.x)):
                    outfile.write(str(self.fitpoints.x[j]) + "\t" + str(self.fitpoints.y[j]) + "\t" + str(
                        self.fitpoints.y[j] - func(self.fitpoints.x[j])) + "\n")
                outfile.write("\n\n\n\n")
                pylab.plot(self.fitpoints.x, self.fitpoints.y - func(self.fitpoints.x), 'ro')
                pylab.show()

                #Finally, add the lines to UsedLineList.log
                for line in self.fitpoints.y:
                    outfile2.write("%.10g\n" % line)

                #Output after every order, in case program crashes
                FitsUtils.OutputFitsFile(self.filename, self.orders, func_order=5)
        outfile.close()

        #Output calibrated spectrum to file
        return FitsUtils.OutputFitsFile(self.filename, self.orders, func_order=5)
Exemple #51
0
def Correlate2d(data):
    return np.array([np.sum((sp.correlate(q, np.concatenate((p, p[:-1]))) for p in data),axis=0) for n,q in enumerate(data)])
Exemple #52
0
def radial_velocity(wv_obj,fx_obj,sig_obj,wv_std,fx_std,sig_std,rv_std,rv_std_err,obj_name,std_name):

# Find where standard and object overlap ---------------

	wv_min = max([min(wv_std),min(wv_obj)])
	wv_max = min([max(wv_std),max(wv_obj)])


# Overlap wv_obj and wv_std arrays.  Where they do not overlap, flux is set to 1 ---------------- 
	length=len(wv_obj)
	# For standard
	a_param = wv_std > wv_min
	b_param = wv_std < wv_max  
	i=0
	j=0
	while i < length:
		if a_param[i] == False:
			fx_std[i]=1
			i=i+1
		else:
			i=i+1
	while j < length:
		if b_param[j] == False:
			fx_std[j]=1
			j=j+1
		else:
			j=j+1	
	n_pix_std = len(wv_std)
	# For object
	a_param = wv_obj > wv_min
	b_param = wv_obj < wv_max
	i=0
	j=0
	while i < length:
		if a_param[i] == False:
			fx_obj[i]=1
			i=i+1
		else:
			i=i+1
	while j< length: 
		if b_param[j] == False:
			fx_obj[j]=1
			j=j+1
		else:
			j=j+1
	n_pix_obj = len(wv_obj)



# Creates ln standard wavelength array ---------------------------------
	min_wv_std = min(wv_std)
	max_wv_std = max(wv_std)
	acoef_std = (n_pix_std -1)/(math.log(max_wv_std) - math.log(min_wv_std))
	bcoef_std = (n_pix_std) - (acoef_std * math.log(max_wv_std))

	arr = numpy.arange(n_pix_std)+1
	wv_ln_std = numpy.exp((arr - bcoef_std)/acoef_std)
	
	
# Interpolate data onto same ln wavelength scale -------------------------------

	fx_interp_std = numpy.interp(wv_ln_std, wv_std, fx_std) 
	fx_interp_obj = numpy.interp(wv_ln_std, wv_obj, fx_obj)


# Rebin Data ----------------------------

	wv_arr_std=numpy.asarray(wv_ln_std,dtype=float)
	fx_arr_obj=numpy.asarray(fx_interp_obj,dtype=float)
	fx_arr_std=numpy.asarray(fx_interp_std,dtype=float)
	sig_arr_obj=numpy.asarray(sig_obj,dtype=float)
	sig_arr_std=numpy.asarray(sig_std,dtype=float)
	
	wv_ln_rebin_std=scipy.ndimage.interpolation.zoom(wv_arr_std,10)		#data rebinned by factor of 10
	fx_rebin_obj=scipy.ndimage.interpolation.zoom(fx_arr_obj,10)
	fx_rebin_std=scipy.ndimage.interpolation.zoom(fx_arr_std,10)
	sig_rebin_obj=scipy.ndimage.interpolation.zoom(sig_arr_obj,10)
	sig_rebin_std=scipy.ndimage.interpolation.zoom(sig_arr_std,10)

	
# Plot object and standard so you can clearly see that shift exists --------------------------------
	plt.figure(1)
	plt.plot(wv_ln_rebin_std,fx_rebin_obj,'r')
	plt.plot(wv_ln_rebin_std,fx_rebin_std,'b')
	v=[1.545,1.570,0,2]
	plt.axis(v)	
	

# Cross correlation loop -------------------------------- 
	pix_shift=[]		#initialize array for pixel shift values
	l = 0

	for l in range(0,500):
	
	# GETTING ARRAYS READY FOR CROSS CORRELATION
		
		# Randomize noise:
		# create gaussian distribution of random numbers b/t 1 and -1, multiply err by numbers, add numbers to flux
		fx_temp_obj=[None]*len(fx_rebin_obj)
		fx_temp_std=[None]*len(fx_rebin_std)
		rand_dist=[None]*len(fx_rebin_std)
		rand_dist2=[None]*len(fx_rebin_std)
		rand_dist=[random.gauss(0,.34) for i in rand_dist]
		rand_dist2=[random.gauss(0,.34) for i in rand_dist2]
		rand_dist=numpy.array(rand_dist)
		rand_dist2=numpy.array(rand_dist2)
		fx_temp_obj=numpy.array(fx_temp_obj)
		fx_temp_std=numpy.array(fx_temp_std)
		fx_temp_obj = fx_rebin_obj + (sig_rebin_obj * rand_dist)
		fx_temp_std = fx_rebin_std + (sig_rebin_std * rand_dist2)
		
		# Find std dev and mean of flux data
		mean_obj=fx_temp_obj.mean()
		mean_std=fx_temp_std.mean()
		stddev_obj=fx_temp_obj.std()
		stddev_std=fx_temp_std.std()
		
		# Regularize data (subtract mean, divide by std dev)
		fx_reg_temp_obj = fx_temp_obj-mean_obj
		fx_reg_temp_obj = fx_reg_temp_obj/stddev_obj
		fx_reg_temp_std = fx_temp_std-mean_std
		fx_reg_temp_std = fx_reg_temp_std/stddev_std
	

	# CROSS CORRELATION 

		# what you're comparing: obj flux and std flux
		y1=fx_reg_temp_obj
		y2=fx_reg_temp_std
		
		# compute the cross-correlation between y1 and y2
		ycorr = scipy.correlate(y1, y2, mode='full')
		ycorr1=ycorr[9750:10750]	#isolate section of array with gaussian

		length=len(ycorr1)
		xcorr=range(length)	#create x axis values
		#print xcorr		
		
		def chi2(p):	#define gaussian function for fitting
			sig2=p[2] ** 2
			m = (p[0] * numpy.exp(-0.5 * (xcorr - p[1]) ** 2 / sig2)) + p[3]
			return (ycorr1 - m)	
		
		amp = 6000	# guess some values
		mean = 300
		sig = 100
		sky = 1000	
		
		amp, mean, sig, sky = op.leastsq(chi2, [amp, mean, sig, sky])[0]
		
		
		#print 'amp=',amp,' mu=',mean, ' sig=',sig, ' sky=',sky
		
		print_num=l%50		#prints data every 100 fits
		if print_num == 0:
			print 'amp=',amp,' mu=',mean, ' sig=',sig, ' sky=',sky
		
		mean1=mean+9750	#add 9750 because I cut array down to just include gaussian

		ycorr_length=len(ycorr)
		pix_shift_val=(ycorr_length/2) - mean1

		pix_shift.append(pix_shift_val)
		
		l=l+1	

# End cross correlation loop --------------------------------- 
	

	#print len(ycorr)	
	#print pix_shift
	#pix_shift=numpy.array(pix_shift)	
	(mu,sigma)=norm.fit(pix_shift)	# get mean and std dev of array of pixel shift values
	print mu,sigma	
	
	my_gauss=[None]*len(xcorr)
	i=0
	while i < len(xcorr):	#creating an array based on values determined by gaussian fit
		sig2=sig ** 2	
		my_gauss[i] = (amp * (numpy.exp(-0.5 * ((xcorr[i] - mean) ** 2) / sig2))) + sky
		i=i+1
	
# Apply shift to arrays -------------------------------- 
	
	fx_rebin_list_obj=fx_rebin_obj.tolist()
	fx_rebin_list_std=fx_rebin_std.tolist()
	print 'mu=',mu
	if mu < 0:
		val= abs(mu)	# so we can shift properly	
		i=0
		while i < val:
			del fx_rebin_list_obj[0]
			fx_rebin_list_obj.append(1)
			i=i+1
		print 'mu is negative'		
	elif mu >= 0:
		val=mu
		i=0
		while i < val:
			del fx_rebin_list_std[0]	
			fx_rebin_list_std.append(1)
			i=i+1
	print 'mu=',mu		

# Create plots --------------------------------- 
	
	fig=plt.figure(l+1, figsize=(10,10))
	plt.plot([1,2,3])
	
	#Plots target and standard with shift applied
	plt.subplot(311)
	plt.plot(wv_ln_rebin_std, fx_rebin_list_obj, 'red')
	plt.plot(wv_ln_rebin_std, fx_rebin_list_std, 'blue')
	plt.xlabel('wavelength (microns)')
	plt.ylabel('normalized flux')
	target = 'Target: %s' %(obj_name)
	standard = 'Standard: %s' %(std_name)
	plt.annotate(target,xy=(.6,.9),xycoords='axes fraction',xytext=(.6,.9),textcoords='axes fraction',color='red') 
	plt.annotate(standard,xy=(.6,.8),xycoords='axes fraction',xytext=(.6,.8),textcoords='axes fraction',color='blue') 
	#plt.subplots_adjust(hspace=.5)
	
	#Plots example of gaussian fit to cross correlation function
	plt.subplot(312)
	plt.plot(xcorr, ycorr1, 'k.')
	plt.plot(xcorr, my_gauss, 'r--', linewidth=2)
	plt.xlabel('example of fit to cross correlation function')
	
	#print pix_shift

# Transform pixel shift to shift in radial velocity -------------------------------- 
	
	vshift=.426*mu
	err=.426*sigma
	print "vshift=",vshift
	
	rv_obj=rv_std-vshift
	
	rv_err=err+rv_std_err
	print "rv_obj=",rv_obj, "+/-", rv_err, ' km/s'
	
	rv_obj_round=round(rv_obj,4)
	err_round=round(rv_err,4)
	
	pix_shift_conv1=[.426*i for i in pix_shift]
	rv_arr=[(rv_std-i) for i in pix_shift_conv1]
	
	
# Plot histogram of pixel shift values -------------------------------- 
	plt.subplot(313)
	n, bins, patches=plt.hist(rv_arr,normed=1.0,facecolor='green',align='mid') 
	#Plot best fit gaussian over histogram
	y=mlab.normpdf(bins,rv_obj,err)
	plt.plot(bins,y,'r--',linewidth=2)
	plt.xlabel('radial velocity of target')
	plt.ylabel('frequency (normalized)')
	rad='RV = %s +/- %s' %(rv_obj_round,err_round)
	plt.annotate(rad,xy=(.6,.9),xycoords='axes fraction',xytext=(.65,.9),textcoords='axes fraction',color='black')
	plt.subplots_adjust(hspace=.4)

	figname='rv_%s.pdf' %(obj_name)
	plt.savefig(figname)
Exemple #53
0
	d.pop(0)
	#print d
	n=numpy.array(map(float, d))
	#dapp=numpy.average(n)
	dapp=d.pop(0)
        dd.append(dapp)
    f.close()
    return dd

listX = LoadData('../brainlab/vb-sync-XReport.txt')
listY = LoadData('../brainlab/vb-sync-YReport.txt')

A=numpy.array(listX)
B=numpy.array(listY)

xcorr = scipy.correlate(A, B)

period = 1.0
tmax = 2.0
nsamples = len(listX)
phase_shift = 0.6*pi #unused

t = numpy.linspace(0.0, tmax, nsamples, endpoint=False)
dt = numpy.linspace(-t[-1], t[-1], 2*nsamples-1)

recovered_time_shift = dt[xcorr.argmax()]
recovered_phase_shift = 2*pi*(((0.5 + recovered_time_shift/period) % 1.0) - 0.5)

print "xcorr = ", xcorr
print "xcorr.argmax = ", xcorr.argmax()
print "xcorr.len = ", len(xcorr)
Exemple #54
0
    ##setup done, now analyse

    mic_correlation = zeros([num_mic_pairs,(2*num_corr - 1)])

    for k in arange(num_mic_pairs):

        x = zeros(num_corr)
        y1 = zeros(num_corr)
        y2 = zeros(num_corr)

        for p in arange(num_corr):
            x[p] = ALS_data[p,0]
            y1[p] = ALS_data[p,(int(first_mic_in_pair[k]+1))]
            y2[p] = ALS_data[p,(int(second_mic_in_pair[k]+1))]#check this part

        ycorr = scipy.correlate(y1,y2,mode = 'full')
        xcorr = scipy.linspace(0,len(ycorr)-1, num = len(ycorr))

        ycorr = ycorr/sqrt(np.mean(ycorr*ycorr))

        ycorr_envelope = abs(scipy.signal.hilbert(ycorr)) #get hilbert envelope

        ycorr_fft = scipy.fft(ycorr)
        fft_max_period = len(ycorr_fft)/np.argmax(abs(ycorr_fft[0:int(len(ycorr_fft)/2)]))

        mic_pair_to_plot_corr = 0
        if k == mic_pair_to_plot_corr:
            #vizualisedata
            pylab.subplot(311)
            pylab.plot(x,y1,'r.')
            pylab.plot(x,y2,'b.')
def FitWavelength2(order, telluric, linelist, tol=0.05, oversampling=4, fit_order=3, max_change=2.0, debug=False):
    old = []
    new = []

    #Interpolate to finer spacing
    DATA_FCN = scipy.interpolate.UnivariateSpline(order.x, order.y, s=0)
    CONT_FCN = scipy.interpolate.UnivariateSpline(order.x, order.cont, s=0)
    MODEL_FCN = scipy.interpolate.UnivariateSpline(telluric.x, telluric.y, s=0)
    data = DataStructures.xypoint(order.x.size * oversampling)
    data.x = np.linspace(order.x[0], order.x[-1], order.x.size * oversampling)
    data.y = DATA_FCN(data.x)
    data.cont = CONT_FCN(data.x)
    model = DataStructures.xypoint(data.x.size)
    model.x = np.copy(data.x)
    model.y = MODEL_FCN(model.x) * BSTAR(model.x)

    #Begin loop over the lines
    for line in linelist:
        if line - tol > data.x[0] and line + tol < data.x[-1]:
            #Find line in the model
            left = np.searchsorted(model.x, line - tol)
            right = np.searchsorted(model.x, line + tol)
            minindex = model.y[left:right].argmin() + left

            mean = model.x[minindex]
            left2 = np.searchsorted(model.x, mean - tol * 2)
            right2 = np.searchsorted(model.x, mean + tol * 2)

            argmodel = DataStructures.xypoint(right2 - left2)
            argmodel.x = np.copy(model.x[left2:right2])
            argmodel.y = np.copy(model.y[left2:right2])

            #Do the same for the data
            left = np.searchsorted(data.x, line - tol)
            right = np.searchsorted(data.x, line + tol)
            minindex = data.y[left:right].argmin() + left

            mean = data.x[minindex]

            argdata = DataStructures.xypoint(right2 - left2)
            argdata.x = np.copy(data.x[left2:right2])
            argdata.y = np.copy(data.y[left2:right2] / data.cont[left2:right2])

            #Do a cross-correlation first, to get the wavelength solution close
            ycorr = scipy.correlate(argdata.y - 1.0, argmodel.y - 1.0, mode="full")
            xcorr = np.arange(ycorr.size)
            maxindex = ycorr.argmax()
            lags = xcorr - (argdata.x.size - 1)
            distancePerLag = (argdata.x[-1] - argdata.x[0]) / float(argdata.x.size)
            offsets = -lags * distancePerLag
            shift = offsets[maxindex]
            shift, success = scipy.optimize.leastsq(WavelengthErrorFunction, shift, args=(argdata, argmodel))
            if (debug):
                print argdata.x[0], argdata.x[-1], argdata.x.size
                print "wave: ", mean, "\tshift: ", shift, "\tsuccess = ", success
                pylab.plot(model.x[left:right] - shift, model.y[left:right], 'g-')
                pylab.plot(argmodel.x, argmodel.y, 'r-')
                pylab.plot(argdata.x, argdata.y, 'k-')
            if (success < 5):
                old.append(mean)
                new.append(mean + float(shift))
    if debug:
        pylab.show()
        pylab.plot(old, new, 'ro')
        pylab.show()
    #fit = UnivariateSpline(old, new, k=1, s=0)
    #Iteratively fit to a cubic with sigma-clipping
    fit = np.poly1d((1, 0))
    mean = 0.0
    done = False
    while not done and len(old) > fit_order:
        done = True
        mean = np.mean(old)
        fit = np.poly1d(np.polyfit(old - mean, new, fit_order))
        residuals = fit(old - mean) - new
        std = np.std(residuals)
        #if debug:
        #  pylab.plot(old, residuals, 'ro')
        #  pylab.plot(old, std*np.ones(len(old)))
        #  pylab.show()
        badindices = np.where(np.logical_or(residuals > 2 * std, residuals < -2 * std))[0]
        for badindex in badindices[::-1]:
            print "Deleting index ", badindex + 1, "of ", len(old)
            del old[badindex]
            del new[badindex]
            done = False

    #Check if the function changed things by too much
    difference = np.abs(order.x - fit(order.x - mean))
    if np.any(difference > max_change):
        fit = np.poly1d((1, 0))
        mean = 0.0

    if debug:
        pylab.plot(old, fit(old - mean) - new, 'ro')
        pylab.show()
        pylab.plot(fit(order.x - mean), order.y, 'k-')
        pylab.plot(model.x, model.y, 'g-')
        print mean
        print fit
        pylab.show()

    return fit, mean
Exemple #56
0
	parameters['BRAINNAME']='vb-exp'+str(int(ii))
	brainsim.simBrain(parameters)
	listX = brainsim.loadBrain(parameters['BRAINNAME']+'-XReport.txt')
	listY = brainsim.loadBrain(parameters['BRAINNAME']+'-YReport.txt')

	nsamples=len(listX)
	t = numpy.linspace(0.0, parameters['ENDSIM'], nsamples, endpoint=False)
	A=numpy.array(listX).astype(float)
	B=numpy.array(listY).astype(float)

	# Purely spike correlation
	cntX,spikesX = countSpikes(listX)
	cntY,spikesY = countSpikes(listY)
	print "cntX,spikesX: ", cntX, spikesX
	print "cntY,spikesY: ", cntY, spikesY
	sxcorr = scipy.correlate(spikesX, spikesY)
	spikecorr.append(sxcorr[sxcorr.argmax()])

	# Overall correlation
	xcorr = giveCorrelation()
	correlations.append(xcorr[xcorr.argmax()])

	# Total_correlation from Brian Simulator
	#totalcorr.append(-1*totalCorrelation(spikesX,spikesY,cntX,cntY)*100000)

	# Phase shift
	phases.append(givePhaseShift(xcorr))
#	showSpikePlotXY()
	ii=ii+1

print "Spike Correlations:", spikecorr
Exemple #57
0
z_2 = hdulist[0].header["Z"]
flux_2 = hdulist[0].data
hdulist.close()

if z_1 > z_2:
    numerator = z_1
    denominator = z_2
    flux_l = flux_1
    flux_s = flux_2
else:
    numerator = z_2
    denominator = z_1
    flux_l = flux_2
    flux_s = flux_s

corr = s.correlate(flux_s[0], flux_l[0], mode="same")
corr_norm_flux = corr / s.sqrt((corr * corr).sum())

median = n.median(wave)

ratio = n.log10((1 + numerator) / (1 + denominator))
peak = median - ratio

p.figure()
p.plot(wave, corr_norm_flux, "k", color="g")
p.xlabel("Angstroms")
p.ylabel(bunit_1)
p.title("Z_1= " + str(z_1) + " and Z_2 =" + str(z_2))
p.axvline(peak, color="r")
p.savefig("/astro/u/bhagwadG/plots/correlated/" + "correlated" + "_" + mjd_1 + "_" + mjd_2 + ".png")
Exemple #58
0
    def plot(self, stim=None, Tseg=0):
        import pylab
        import matplotlib

        if stim is None:
            stim = self.generate()

        matplotlib.rc('text', usetex=True)
        pylab.figure()
        pylab.subplot(3, 1, 1)

        binwidth = 30e-3  #self.binwidth
        tr = numpy.arange(0, stim.Tsim, stim.dt)
        (y, ty) = spikes2rate(utils.flatten([c.data for c in stim.channel]),
                              binwidth)

        ty = ty.compress((y != numpy.nan).flat)
        y = y.compress((y != numpy.nan).flat)

        pylab.plot(tr, stim.r, ty, y / self.nChannels, 'r--')
        pylab.axis('tight')
        pylab.setp(pylab.gca(), xlim=[0, self.Tstim])
        pylab.xlabel('time [s]')
        pylab.ylabel('rate per spike train [Hz]')

        pylab.title('rates')
        pylab.legend(
            ('r(t)',
             r'$r_{measured}$ ($\Delta=%s~ms$)' % str(binwidth * 1000)))

        pylab.subplot(3, 1, 2)  # cla reset; hold on;

        for j in range(self.nChannels):
            st = stim.channel[j].data
            for spike in list(st):
                pylab.plot(numpy.array([spike, spike]),
                           (numpy.array([[-0.3], [0.3]]) + j + 1),
                           color='k')

        pylab.setp(pylab.gca(),
                   xlim=[0, self.Tstim],
                   ylim=[0.5, self.nChannels + 0.5],
                   yticks=numpy.arange(self.nChannels) + 1)
        pylab.xlabel('time [s]')
        pylab.ylabel('channel')
        pylab.title('spike trains')  #, fontweight='bold')

        pylab.subplot(3, 1, 3)
        if Tseg > 0:
            r = self.rand_rate(Tseg)
            cr = scipy.correlate(r - numpy.mean(r),
                                 r - numpy.mean(r),
                                 mode='full')
        else:
            Tseg = self.Tstim
            cr = scipy.correlate(stim.r - numpy.mean(stim.r),
                                 stim.r - numpy.mean(stim.r),
                                 mode='full')

        cr = cr / max(cr)
        tr = (numpy.arange(len(cr)) - len(cr) / 2) * self.dt

        cs = scipy.correlate(y - y.mean(), y - y.mean(), mode='full')
        cs = cs / cs.max()

        ts = (numpy.arange(len(cs)) - len(cs) / 2) * self.dt

        pylab.plot(tr, cr, ts, cs, 'r--')
        pylab.xlabel('lag [s]')
        pylab.ylabel('correlation coeff')
        mm = max(abs(min(min(tr), min(ts))), abs(max(max(tr), max(ts))))

        pylab.setp(pylab.gca(), xlim=[-mm, mm], ylim=[min(cs), 1])
        pylab.title('auto-correlation', fontweight='bold')
        pylab.legend(
            ('r(t)',
             r'$r_{measured}$ ($\Delta=%s~ms$)' % str(binwidth * 1000)))