Пример #1
0
def periodogramfft(d, time, data):
    from scipy.signal import hann, periodogram
    import numpy.fft as fft
    print "d=", d

    pidx, pv, tidx, tv = peaks.turning_points(data, minidx=0)
    tts = time[pidx]
    rspl = UnivariateSpline(time, data, k=4, s=0)
    t0 = tts[1]
    tf = tts[d + 1]
    dt = time[1] - time[0]

    twin = np.arange(t0, tf, dt)
    N = len(twin)
    fs = 1 / dt
    #twin1 = np.arange(t0, tts[3], dt)
    #Nfft = len(twin1)
    #fs = N/(tf-t0)
    Nfft = 100000
    rwin = rspl(twin)
    rwin = rwin - np.mean(rwin)
    hwin = hann(len(twin))
    rwin = rwin * hwin
    #f, pgram = periodogram(hwin, fs=fs, nfft=Nfft)
    fftout = fft.rfft(rwin, 50 * len(rwin))
    fftfreq = fft.rfftfreq(50 * len(rwin), dt)
    print len(fftout)
    print len(fftfreq)
    return 2 * np.pi * fftfreq, np.abs(fftout)
Пример #2
0
def fit_sinusoids(time, data, width=2):

    #plot the data
    #plt.plot(time,data,color="red",label="data")

    #get the amplitude and mean of the data (approx.constant)
    amp, mean = get_amp_mean(data, time)

    #take peak-to-peak ranges of data
    ptimes, pvals, ttimes, tvals = pks.turning_points(data)
    rngs = zip(turns[:-width:width], turns[width::width])

    #number of cycles each rng covers
    ncycles = width / 2.0

    #set this to 2*pi if you want an angular frequency
    fscale = 1.0
    freqs = []
    for a, b in rngs:
        #the frequency in radians per second
        period = (time[b] - time[a]) / ncycles
        freqs.append(fscale / period)

    #set the phase to ensure the fitted curve will pass through the data at the
    #initial point
    phases = []
    for rng, freq in zip(rngs, freqs):
        t = time[rng[0]]
        th = data[rng[0]]
        phases.append(sin_phase(amp, freq, mean, th, t))

    return amp, mean, rngs, freqs, phases
Пример #3
0
def trajpassage(N, traj):
    """Compute from the trajectory the index of the resonance (i.e. the halfway
    index) and those of N full passages in either direction from it.
    """
    residx = len(traj)/2
    pidx, pv = pk.turning_points(traj, joined=True)
    pidx = np.array(pidx, dtype=int)
    try:
        right_ex = np.argmax(pidx>residx)
    except ValueError:
        return [-1, residx, -1]
    right_done = right_ex + N
    if right_done >= len(pidx):
        right_idx = len(traj)-1
    else:
        right_idx = pidx[right_done]
    
    left_ex = right_ex - 1
    left_done = left_ex - N
    if left_done < 0:
        left_idx = 0
    else:
        left_idx = pidx[left_done]
    # plt.plot(traj)
    # plt.plot(residx, traj[residx], marker="o", markersize=10)
    # plt.plot(left_idx, traj[left_idx], marker="o", markersize=10)
    # plt.plot(right_idx, traj[right_idx], marker="o", markersize=10)
    # plt.show()
    return [left_idx, residx, right_idx]
Пример #4
0
def divvy_coef(time, data, coef=0.6):
    """Finds the indices d_k at which data is closest to minimal, with the 0th and last
       indices being 0 and len(data). Computes M_k = (d_k + d_{k+1} / 2), 
       W_k = d_{k+1} - d_k, and returns an array such that each row is an index range:
       [[       0          ,  M_0 + coef * W_0],
        [M_1 - coef * W_1  ,  M_1 + coef * W_1],
           .
           .
           .
        [M_N - coef * W_N  ,        -1        ]]  
        The terms 'M_0 -/+ coef * W_0' are rounded to the nearest integer.
        (they must be good indices)
    """
    # Get a sorted list of all extremal indices.
    pk_idx, pk_v, tr_idx, tr_v = peaks.turning_points(data)
    both_idx = [0] + tr_idx + [len(time)]
    both_idx.sort()
    M_k = [(d0 + d1)/2. for d0, d1 in zip(both_idx[:-1], both_idx[1:])]
    fW_k = [coef*(d1 - d0) for d0, d1 in zip(both_idx[:-1], both_idx[1:])]
    
    left = [M - f for M, f in zip(M_k, fW_k)] 
    right = [M + f for M, f in zip(M_k, fW_k)] 
    index_ranges = np.column_stack((left, right))
    index_ranges[0, 0] = 0 
    index_ranges[-1, 1] = len(time)
    return np.around(index_ranges, decimals=0).astype(int)
Пример #5
0
def get_amp_mean(data, time):
    #to get the amplitude, we first get the peaks.
    #then at first approximation we consider the amplitude to be the average
    #distance between peak and mean.
    ptimes, pvals, ttimes, tvals = pks.turning_points(data)
    vals = pvals + tvals
    mean = np.mean(data)
    distances = mean - vals
    amp = np.mean(distances)
    return amp, mean
Пример #6
0
def get_amp_mean(data, time):
    mean = np.mean(data)

    #to get the amplitude, we first get the peaks.
    #then at first approximation we consider the amplitude to be the average
    #distance between peak and mean.
    turns, vals = pks.turning_points(data)
    distances = np.fabs(mean - vals)
    amp = np.mean(distances)
    return amp, mean
Пример #7
0
def savgol_omega(path, N=7, polorder=3, delta=0.5):
    t = get_coord("t", path)
    xA = np.column_stack((t, get_coord("xA", path)))
    xB = np.column_stack((t, get_coord("xB", path)))
    Omega, OmegaMag = savgol_omegafunc(xA,
                                       xB,
                                       N,
                                       polorder=polorder,
                                       delta=delta)
    peaklist, smooth = peaks.turning_points(OmegaMag[:, 1], return_smooth=True)

    OmegaMag[:, 1] = smooth
    #smoothed = smooth_savgol(OmegaMag)
    #OmegaMag_smooth = smooth_omegamag(OmegaMag)
    return Omega, OmegaMag
Пример #8
0
def periodogram(d, time, data):
    from scipy.signal import hann, lombscargle
    print "d=", d
    #datamean = np.mean(data)
    datashift = data - np.mean(data)
    pidx, pv, tidx, tv = peaks.turning_points(datashift, minidx=0)
    tts = time[pidx]
    rspl = UnivariateSpline(time, datashift, k=4, s=0)
    twin = np.linspace(tts[1], tts[d + 1], 5000)
    rwin = rspl(twin)
    hwin = hann(len(twin))
    rwin = rwin * hwin
    f = np.linspace(0.005, 0.1, 3000)
    pgram = lombscargle(twin, rwin, f)
    pgram = np.sqrt(4 * (pgram / rwin.shape))
    return f, pgram
Пример #9
0
def divvy(time, data, N_overlap=20):
    """ Breaks 'time' into pieces, each containing about N cycles of
        data, such that each piece overlaps its successor by N_overlap points.
        Returns: a 2D numpy array 'pieces' such that pieces[i, 0] is the start
        and pieces[i, 1] the end index of each piece.
    """
    # Get a sorted list of all extremal indices.
    pk_idx, pk_v, tr_idx, tr_v = peaks.turning_points(data)
    both_idx = pk_idx + tr_idx
    both_idx.sort()
    assert both_idx[-2]+N_overlap < len(time), "failed size assumptions!"
    assert both_idx[-1] < len(time), "failed size assumptions!"
    pieces = np.zeros((len(both_idx)+1, 2))
    pieces[1:, 0] = both_idx[:]
    pieces[:-1, 1] = np.copy(both_idx[:]) + N_overlap
    pieces[-1, 1] = len(time)
    return pieces.astype(int)
Пример #10
0
def flip(data, junk_inds=0, shift=True):
    """Reflect descending parts of an oscillating function such that they increase.
    """
    pk_idx, pk_v, tr_idx, tr_v = peaks.turning_points(data, minidx=junk_inds)
    for a in [pk_idx, pk_v, tr_idx, tr_v]:
        if len(a) <= 0:
            return data
        #assert len(a) > 0, "No peaks found!"
    #we always want to flip between peaks and troughs
    initially_descending = True

    if pk_idx[0] == 0:
        initially_descending = True
    elif tr_idx[0] == 0:
        initially_descending = False
    elif pk_idx[0] < tr_idx[0]:
        initially_descending = False
    elif pk_idx[0] > tr_idx[0]:
        initially_descending = True
        pk_idx = [0] + pk_idx
        pk_v = [data[0]] + pk_v
    else:
        raise ValueError("Identified same point as peak and trough")

    last = len(data)
    if pk_idx[-1] != last and tr_idx[-1] != last:
        if pk_idx[-1] > tr_idx[
                -1]:  #last extremum is a peak so we end descending
            tr_idx = tr_idx + [last]
    if tr_idx[0] == 0:
        tr_idx = tr_idx[1:]
    ranges = zip(pk_idx, tr_idx, pk_v)
    output = np.copy(data)
    for l, r, this_max in ranges:
        output[l:r] = 2 * this_max - data[l:r]

    ##account for the initial phase
    if initially_descending and shift:
        output -= 2 * output[0]
    return output
Пример #11
0
def unwrap(data, thresh=0.2, initial_fraction=1., scale=1., junk_inds=200):
    """Produce a continuous 'unwrapped phase' which increases monotonically and accretes
       by 2Pi with every period in the data.
       Step 1: make monotonic by reflecting around the y=ymax line
               at every point where the derivative changes sign.
            2: This produces a set of discontinous curves, one per cycle. Each
               should represent an increase by scale. Thus, multiply every range
               by scale*(max - min).
            3: Make the curve continuous by
               shifting each cycle up such that it joins with its predecessor.
       Params: x'thresh' is a minimum discontinuity required to identify a cycle.
                 Useful for ignoring junk.
               x'starts_ascending' should be set to True if the data is initially
                 ascending (in principle this could be detected automatically).
                 If your results seem to be correct up to a y-reflection for each
                 period, try flipping this value.
               x'intial_fraction' is the fraction of the relevant phase the orbit
                 begins at. For r-data, for example, we typically begin at 
                 apastron and this should be set to 0.5. If your initial cycle
                 seems distorted compared to the others in a systematic (i.e.
                 non-junk like way) you probably need to adjust this.
    """
    flipped = flip(data, junk_inds=junk_inds)
    pk_idx, pk_v, tr_idx, tr_v = peaks.turning_points(flipped,
                                                      minidx=junk_inds)
    for a in [pk_idx, pk_v, tr_idx, tr_v]:
        if len(a) <= 0:
            return flipped, flipped
    output = np.copy(flipped)
    if pk_idx[0] == 0:
        zipvals = zip(pk_idx[1:], pk_v[1:])
    else:
        zipvals = zip(pk_idx, pk_v)
    for idx, val in zipvals:
        output[idx + 1:] += val  #+ central
    return output, flipped
Пример #12
0
def monotonic_acos(time, data, tnew=None, unwrap=True, minidx=200, Niter=0):
    """Identifies increasing and decreasing branches of 'data' by finding 
       extrema
       and comparing their heights (e.g. the range between a minimum [maximum]
       and a maximum [minimum] is increasing [decreasing]). Then, applies
       the following piecewise function:
          y = acos(data) {decreasing branch}
          y = 2Pi - acos(data) {increasing branch}
       This produces a monotonically increasing phase up to jump discontinuities
       at the increasing-decreasing transition points (i.e. at the branch cuts
       of acos). If unwrap is True, this function will add factors of 2Pi to 
       the jump discontinuities in order to produce a smooth curve.

       If tnew is not None, the above function is evaluated at the points
       data(tnew) only, which are interpolated from the original time series
       'time' using a third order spline. The arguments 'time' and 'tnew'
       must both be supplied if either one is.
       
       Typically tnew will be the radial extremum times found from omega.
    """
    pidx, pv, tidx, tv = peaks.turning_points(data, minidx=minidx)
    amp, off = ampoffset(time, data)
    dscale = (data - off) / amp
    et, ev = peaks.joined_peaks(time, data)

    nrngs = len(et) + 1
    npi = np.zeros(nrngs)
    if tidx[0] < pidx[0]:  #initially descending
        #ascending = [0, 1, 0, 1, 0, 1...]
        #npi = [0, 0, 1, 1, 2, 2, 3, 3...]
        ascending = truefalse(False, nrngs)
        npi[::2] = np.arange(0, len(npi[::2]))
        npi[1::2] = np.arange(0, len(npi[1::2]))
    elif pidx[0] < tidx[0]:  #initially ascending
        #ascending = [1, 0, 1, 0, 1...]
        #npi = [0, 1, 1, 2, 2, 3, 3...]
        ascending = truefalse(True, nrngs)
        npi[1::2] = np.arange(1, len(npi[1::2]) + 1)
        npi[2::2] = np.arange(1, len(npi[2::2]) + 1)
    else:
        raise ValueError("First extremum identified as both trough and peak.")
    pzip = zip(et, ascending, npi)
    if tnew is not None:
        spl = UnivariateSpline(time, dscale, s=0, k=4)
        dnew = spl(tnew)
    else:
        dnew = dscale
        tnew = time

    chi = np.zeros(len(tnew))
    lidx = 0
    breaknow = False
    for t_r, ascend, thisnpi in pzip:
        if t_r > tnew[-1]:
            ridx = len(tnew)
            breaknow = True
        else:
            ridx = np.argmin(tnew <= t_r)
        chi[lidx:ridx] = piecewise_acos(dnew[lidx:ridx], ascend)
        if unwrap:
            chi[lidx:ridx] += 2.0 * np.pi * thisnpi
        if breaknow:
            break
        lidx = ridx
    return chi
Пример #13
0
def freq_from_splines(
        data,
        offset_order=1,  #
        amp_order=3,
        amp_smoothness=0,
        signal_order=5,
        signal_smoothness=1):
    t = data[:, 0]
    y = data[:, 1]

    #Enforce correct types and assumptions
    if not isinstance(data, np.ndarray):
        raise TypeError("data should be a numpy array")
    if data.ndim != 2:
        raise ValueError("data should be 2 dimensional; first axis time")

    if not isinstance(offset_order, int):
        raise TypeError("offset_order must be of integer type")
    if (offset_order < 0):
        raise ValueError("offset_order must be nonnegative.")

    if not isinstance(amp_order, int):
        raise TypeError("amp_order must be of integer type")
    if (amp_order < 0):
        raise ValueError("amp_order must be nonnegative.")

    if not isinstance(amp_smoothness, int):
        raise TypeError("amp_smoothness must be of integer type")
    if (amp_smoothness < 0):
        raise ValueError("amp_smoothness must be nonnegative.")

    if not isinstance(signal_order, int):
        raise TypeError("signal_order must be of integer type")
    if (signal_order < 3):
        raise ValueError("signal_order must be at least 3.")

    if not isinstance(signal_smoothness, int):
        raise TypeError("signal_smoothness must be of integer type")
    if (signal_smoothness < 0):
        raise ValueError("signal_smoothness must be nonnegative.")

    #Compute the offset M
    M = do_fit(t, y, offset_order)
    #Get peaks and troughs
    #The indices and values of the peaks.
    turns, turn_vals = pks.turning_points(y, 0)

    #Distinguish the peaks from the troughs
    peaks, peak_vals, troughs, trough_vals = pks.peaks_and_troughs(
        turns, turn_vals)
    #Spline interpolations of both (this seems to work better than polyfits)
    pspl = interpolate.UnivariateSpline(t[peaks],
                                        peak_vals,
                                        k=amp_order,
                                        s=amp_smoothness)
    trspl = interpolate.UnivariateSpline(t[troughs],
                                         trough_vals,
                                         k=amp_order,
                                         s=amp_smoothness)
    P = np.zeros(len(t))
    P = pspl(t)
    Tr = np.zeros(len(t))
    Tr = trspl(t)

    #The amplitude is half the distanc between the peaks and trough
    #at each point.
    A = np.zeros(len(t))
    A[:] = 0.5 * (P - Tr)

    #Now we can form Q
    Q = np.zeros(len(t))
    Q[:] = (y[:] - M[:]) / A[:]

    hsig = signal.hilbert(Q)
    #print hsig
    #print np.absolute(hsig)
    #plt.plot(t,hsig.real,color="green",label="real")
    #print hsig.imag
    #plt.plot(t,hsig.imag,color="grey",label="imag")
    #We need Qdot
    #interpolate and take derivative
    Qspl = interpolate.UnivariateSpline(t,
                                        Q,
                                        k=signal_order,
                                        s=signal_smoothness)
    Qdot_spl = Qspl.derivative()
    Qdot = Qdot_spl(t)
    #plt.plot(t,Qdot/hsig.imag)
    #plt.plot(t,Qspl(t),label="Qspl")
    #plt.plot(t,Qdot_spl(t),label="Qdotspl")

    #We also need Q^2
    #restrict to [-1,1] and square
    np.clip(Q, -1.0, 1.0, out=Q)
    Qsq = np.square(Q)
    denom = np.sqrt(1.0 - Qsq)

    freq = np.zeros(len(t))
    freq[:] = (1. / (2. * np.pi)) * (Qdot / denom)

    #spline interpolate the arcin to extract its derivative
    asQ = np.arcsin(Q)
    asQspl = interpolate.UnivariateSpline(t,
                                          asQ,
                                          k=signal_order,
                                          s=signal_smoothness)
    asQdot_spl = asQspl.derivative()
    #freq = np.zeros( len(t) )
    #freq[:] = (1./(2.*np.pi))*Qdot_spl( t )
    #plt.plot(t,freq,label="freq")

    #plt.plot(t,y,color = "blue", label="data")
    #plt.plot(t, asQ, color="violet", label="asQ")
    #plt.plot( t, Q, color="blue", label="Q" )
    #plt.plot( t, M, color = "black", label="offset" )
    #plt.plot( t, P, color = "red", label="peaks" )
    #plt.plot( t, Tr, color = "red", label="troughs" )

    plt.legend()
    plt.show()
    return 0
Пример #14
0
def t_resonance(phase):
  turns, turn_vals = pk.turning_points(phase[:,1])
  i = np.argmax(turn_vals)
  return phase[turns[i],0]
Пример #15
0
def unwrapped_ang(ang):
    peaktimes, peakvals, troughtimes, troughvals = peaks.turning_points(ang)  
    unwrapped = np.zeros(ang.shape)
    grad = np.gradient(ang, 0.5)
    unwrapped = ang[0]+np.cumsum(np.fabs(grad)) 
    return unwrapped