Пример #1
0
def check_divergence(prices, indicators):
    prices = np.array(prices)
    indicators = np.array(indicators)

    (price_min_indexes, ) = argrelmin(prices)
    (price_max_indexes, ) = argrelmax(prices)

    (rsi_min_indexes, ) = argrelmin(indicators)
    (rsi_max_indexes, ) = argrelmax(indicators)

    print('################')
    print(rsi_min_indexes)
    print('################')
    print(rsi_max_indexes)

    # bearish divergence
    if len(price_max_indexes) >= 2 and len(rsi_max_indexes) >= 2:
        if price_max_indexes[-1] == rsi_max_indexes[-1] == len(prices) - 3 and \
                price_max_indexes[-2] == rsi_max_indexes[-2] and \
                rsi_max_indexes[-1] - rsi_max_indexes[-2] > 4:
            if prices[price_max_indexes[-1]] > prices[price_max_indexes[-2]] and \
                    indicators[price_max_indexes[-1]] < indicators[price_max_indexes[-2]]:
                # print('bearish divergence')
                return SignalDirection.SHORT

    if len(price_min_indexes) >= 2 and len(rsi_min_indexes) >= 2:
        if price_min_indexes[-1] == rsi_min_indexes[-1] == len(prices) - 3 and \
                price_min_indexes[-2] == rsi_min_indexes[-2] and \
                rsi_min_indexes[-1] - rsi_min_indexes[-2] > 4:
            if prices[price_min_indexes[-1]] < prices[price_min_indexes[-2]] and \
                    indicators[price_min_indexes[-1]] > indicators[price_min_indexes[-2]]:
                # print('bullish divergence')
                return SignalDirection.LONG
Пример #2
0
def elliptical_orbit_to_events2(t, w, x_pool, y_pool, z_pool):
    """
    Convert an orbit to MIDI events using Cartesian coordinates and rules.

    For Cartesian orbits...

    Parameters
    ----------
    t : array_like
    w : array_like
    midi_pool : array_like

    """

    x, y, z = w.T[:3]

    # quantize the periods and map on to notes
    x_cross = np.array([argrelmin(xx**2)[0] for xx in x])
    y_cross = np.array([argrelmin(yy**2)[0] for yy in y])
    z_cross = np.array([argrelmin(zz**2)[0] for zz in z])

    r_x = np.sqrt(y**2 + z**2)
    r_y = np.sqrt(x**2 + z**2)
    r_z = np.sqrt(x**2 + y**2)

    q_r_x = quantize(np.sqrt(r_x),
                     nbins=len(x_pool),
                     max=np.sqrt(r_x).min(),
                     min=np.sqrt(r_x).max())
    q_r_y = quantize(np.sqrt(r_y),
                     nbins=len(y_pool),
                     max=np.sqrt(r_y).min(),
                     min=np.sqrt(r_y).max())
    q_r_z = quantize(np.sqrt(r_z),
                     nbins=len(z_pool),
                     max=np.sqrt(r_z).min(),
                     min=np.sqrt(r_z).max())

    delays = []
    notes = []
    for j in range(w.shape[0]):
        _no = []
        for i in range(w.shape[1]):
            if j in x_cross[i]:
                _no.append(x_pool[q_r_x[i, j]])

            if j in y_cross[i]:
                _no.append(y_pool[q_r_y[i, j]])

            if j in z_cross[i]:
                _no.append(z_pool[q_r_z[i, j]])

        if len(_no) > 0:
            delays.append(t[j])
            notes.append(np.unique(_no).tolist())

    delays = np.array(delays)
    notes = np.array(notes)

    return delays, notes
Пример #3
0
def get_peaks(freq, signal, method='numpy'):

    if method == 'numpy':

        try:

            fit = nufit_fourier(freq, signal)

            min_peaks = argrelmin(fit)[0]
            min_x = freq[min_peaks]
            min_y = fit[min_peaks]

            max_peaks = argrelmax(fit)[0]
            max_x = freq[max_peaks]
            max_y = fit[max_peaks]

        except Exception as e:

            print(e)
            min_x, min_y, max_x, max_y = None, None, None, None

    elif method == 'lm':

        try:

            fit = lmfit_fourier(freq, signal)

            min_peaks = argrelmin(fit)[0]
            min_x = freq[min_peaks]
            min_y = fit[min_peaks]

            max_peaks = argrelmax(fit)[0]
            max_x = freq[max_peaks]
            max_y = fit[max_peaks]

        except Exception as e:

            print(e)
            min_x, min_y, max_x, max_y = None, None, None, None

    elif method == 'sym':

        try:

            fit = symfit_fourier(freq, signal)

            min_peaks = argrelmin(fit)[0]
            min_x = freq[min_peaks]
            min_y = fit[min_peaks]

            max_peaks = argrelmax(fit)[0]
            max_x = freq[max_peaks]
            max_y = fit[max_peaks]

        except Exception as e:

            print(e)
            min_x, min_y, max_x, max_y = None, None, None, None

    return (min_x, min_y), (max_x, max_y)
Пример #4
0
def find_average_det_vel(t, D):
    """Evaluate average detonation velocity."""
    all_minima_idx = signal.argrelmin(D, order=100)[0]
    low_minima_idx = signal.argrelmin(D[all_minima_idx])[0]

    if len(low_minima_idx) == 0 or len(low_minima_idx) == 1:
        low_minima_idx = all_minima_idx
        i_1 = all_minima_idx[0]
        i_2 = all_minima_idx[-1]
    else:
        i_1 = all_minima_idx[low_minima_idx[0]]
        i_2 = all_minima_idx[low_minima_idx[-1]]

    T = t[i_2] - t[i_1]

    plt.plot(t, D, '-')
    plt.plot([t[i_1], t[i_2]], [D[i_1], D[i_2]], 'ro')
    plt.show()

    # Using trapezoidal rule to evaluate :math:`\int D(\tau) d\tau`.
    integrand = 0.5 * (D[i_1:i_2 - 1] + D[i_1 + 1:i_2])
    integral = np.sum(integrand * np.diff(t[i_1:i_2]))

    D_avg = integral / T

    return D_avg
Пример #5
0
def WidthEstimate1D(inList, method="interpolate"):
    scales = np.zeros(len(inList))
    for idx, y in enumerate(inList):
        x = fft.fftfreq(len(y)) * len(y) / 2.0
        if method == "interpolate":
            minima = (argrelmin(y))[0]
            if minima[0] > 1:
                interpolator = interp1d(y[0 : minima[0]], x[0 : minima[0]])
                scales[idx] = interpolator(np.exp(-1))
        if method == "fit":
            g = models.Gaussian1D(amplitude=y[0], mean=[0], stddev=[10], fixed={"amplitude": True, "mean": True})
            fit_g = fitting.LevMarLSQFitter()
            minima = (argrelmin(y))[0]
            if minima[0] > 1:
                xtrans = (np.abs(x) ** 0.5)[0 : minima[0]]
                yfit = y[0 : minima[0]]
            else:
                xtrans = np.abs(x) ** 0.5
                yfit = y
            output = fit_g(g, xtrans, yfit)
            scales[idx] = np.abs(output.stddev.value[0]) * (2 ** 0.5)
    #             expmod = Model(Exponential1D)
    #             pars = expmod.make_params(amp=y[0],scale=5.0)
    #             pars['amp'].vary = False
    #             result = expmod.fit(y,x=x,params = pars)
    #             scales[idx] = result.params['scale'].value
    return scales
Пример #6
0
def cyl_orbit_to_events(t, w, midi_pool_hi, midi_pool_lo, time_resolution=None):
    """
    Convert an orbit to MIDI events using cylindrical coordinates and rules.

    For cylindrical orbits, crossing the disk midplane (x-y plane) triggers a
    high note. Crossing the x-z plane triggers a low note. The pitch of the note
    is set by the cylindrical radius at the time of either crossing. Smaller
    radius triggers a higher pitch note.

    Parameters
    ----------
    t : array_like
    w : array_like
    midi_pool : array_like

    """

    R = np.sqrt(w[:,:,0]**2 + w[:,:,1]**2)
    phi = np.arctan2(w[:,:,1], w[:,:,0]) % (2*np.pi)
    z = w[:,:,2]

    # set amplitudes from size of z oscillations
    all_amps = np.abs(z).max(axis=0) / 10.

    # variable length arrays
    phi_cross = np.array([argrelmin(pphi)[0] for pphi in phi.T])
    z_cross = np.array([argrelmin(zz**2)[0] for zz in z.T])

    # quantize R orbit
    RR = np.sqrt(R)
    nbins_hi = len(midi_pool_hi)
    q_R_hi = quantize(RR, nbins=nbins_hi, min=RR.max(), max=RR.min())
    nbins_lo = len(midi_pool_lo)
    q_R_lo = quantize(RR, nbins=nbins_lo, min=RR.max(), max=RR.min())

    delays = []
    notes = []
    amps = []
    for j in range(w.shape[0]):
        _no = []
        _amps = []
        for i in range(w.shape[1]):
            if j in z_cross[i]:
                _no.append(midi_pool_hi[q_R_hi[j,i]])
                _amps.append(all_amps[i])

            if j in phi_cross[i]:
                _no.append(midi_pool_lo[q_R_lo[j,i]])
                _amps.append(all_amps[i])

        if len(_no) > 0:
            delays.append(t[j])
            notes.append(np.unique(_no).tolist())
            amps.append(_amps)

    delays = np.array(delays)
    notes = np.array(notes)
    amps = np.array(amps)

    return delays, notes, amps
Пример #7
0
def WidthEstimate1D(inList, method='interpolate'):
    scales = np.zeros(len(inList))
    for idx, y in enumerate(inList):
        x = fft.fftfreq(len(y)) * len(y) / 2.0
        if method == 'interpolate':
            minima = (argrelmin(y))[0]
            if minima[0] > 1:
                interpolator = interp1d(y[0:minima[0]], x[0:minima[0]])
                scales[idx] = interpolator(np.exp(-1))
        if method == 'fit':
            g = models.Gaussian1D(amplitude=y[0],
                                  mean=[0],
                                  stddev=[10],
                                  fixed={
                                      'amplitude': True,
                                      'mean': True
                                  })
            fit_g = fitting.LevMarLSQFitter()
            minima = (argrelmin(y))[0]
            if minima[0] > 1:
                xtrans = (np.abs(x)**0.5)[0:minima[0]]
                yfit = y[0:minima[0]]
            else:
                xtrans = np.abs(x)**0.5
                yfit = y
            output = fit_g(g, xtrans, yfit)
            scales[idx] = np.abs(output.stddev.value[0]) * (2**0.5)


#             expmod = Model(Exponential1D)
#             pars = expmod.make_params(amp=y[0],scale=5.0)
#             pars['amp'].vary = False
#             result = expmod.fit(y,x=x,params = pars)
#             scales[idx] = result.params['scale'].value
    return scales
Пример #8
0
    def decomposeSignal(self,sig,tims):
        #Make sure signal is of even length
        if (len(sig)%2 > 0):
            sig = sig[1:]

        # Use fft to decompose
        sigf = np.fft.fft(sig)
        signalInterval = np.mean([abs(y-x) for x,y in zip(tims[:-1],tims[1:])])
        faxis = np.fft.fftfreq(len(sig),d=signalInterval)

        #Find the maximum frequency and magnitude associated
        sigf = 2. * np.abs(sigf[:(len(sig)/2-1)]/len(sig))

        
        if ((not np.isinf(faxis[np.argmax(sigf)])) & (faxis[np.argmax(sigf)]>0)):
             peaks1 = argrelmax(np.array(sig)-np.median(sig),order = int(np.ceil(faxis[np.argmax(sigf)]/signalInterval/8.)))
             peaks2 = argrelmin(np.array(sig)-np.median(sig),order = int(np.ceil(faxis[np.argmax(sigf)]/signalInterval/8.)))
        else:
             peaks1 = argrelmax(np.array(sig)-np.median(sig),order = 1)
             peaks2 = argrelmin(np.array(sig)-np.median(sig),order = 1)
        peaks = np.concatenate((peaks1[0],peaks2[0]))             
             
        return((1./faxis[np.argmax(sigf)],
                np.median([abs(sig[x]) for x in peaks]),
                np.median([sig[x] for x in peaks1[0]]),
                np.median([sig[x] for x in peaks2[0]])))
Пример #9
0
def get_cut_line(img, num_hor_pieces = 12, num_ver_pieces = 6, auto_fix = None):
    tic = time.time()
    order_hor = num_hor_pieces + 2
    order_ver = num_ver_pieces + 2
    # 如果是10片,就在边缘切掉10个像素
    if num_hor_pieces == 10:
        img = img[:, 50:-50]
    ## 1.。。。
    # horizontal
    horizontal = np.sum(img, axis = 0)
    line_hor = signal.argrelmin(horizontal, order = int(len(horizontal) / order_hor))[0]
    # vertical
    vertical = np.sum(img, axis = 1)
    line_ver = signal.argrelmin(vertical, order = int(len(vertical) / order_ver))[0]
    toc1 = time.time()
    
    #对于10片组件,再把10像素偏移量加回来
    if num_hor_pieces == 10:
        line_hor = line_hor + 50
    
    # 根据传入的图片厂家不同,采用不同的修复方案
    if auto_fix == 'ats_cenghou':
        line_hor, line_ver = fix_ats_cenghou(line_ver, line_hor, num_hor_pieces, num_ver_pieces)
        
    if auto_fix == 'jk_cengqian':
        line_hor, line_ver = fix_jk_cengqian(line_ver, line_hor, num_hor_pieces, num_ver_pieces)
    
    
    toc2 = time.time()
    print('first step time is %.2f secs.' % (toc1 - tic), 'auto fix time is %.2f secs.' % (toc2 - toc1))

    return line_hor, line_ver 
Пример #10
0
def find_mins(arr):
    padded = np.pad(arr.astype(float), (1, 1),
                    'constant',
                    constant_values=np.inf)
    coords = set(zip(*argrelmin(padded)))
    coords = coords.intersection(set(zip(*argrelmin(padded, axis=1))))
    coords = [(y - 1, x - 1) for y, x in coords]
    return coords
    def on_change2(pt):
        #maxi=sp.argrelmax(normList)[0]
        #pt=maxi[pt]
        
        fig=plt.figure(figsize=(20,10))
        
        gs = gridspec.GridSpec(2, 2)
        ax1 = plt.subplot(gs[:, 0])
        ax2 = plt.subplot(gs[0,1])
        ax3 = plt.subplot(gs[1,1])
        
        #ax=plt.subplot(1,2,1)
        ax1.plot(f,normList)
        
        
        
        ax1.plot(f[pt],normList[pt],'ko')
        #ax1.text(f[pt],normList[pt],str(f[pt])+ 'Hz')
        string='f={:.3f} Hz\nMode={:.0f}'.format(f[pt],pt)
        ax1.text(0.05, 0.95, string, transform=ax1.transAxes, fontsize=14,
            verticalalignment='top')
        
        ax1.set_xscale('log')
        ax1.set_yscale('log')
        
        #ax=plt.subplot(1,2,2)
        idxMode=myDMD_Uy.getIdxforFrq(f[pt])
        mode=myDMD_Uy.getMode(idxMode)
        ax2.imshow(np.real(mode),vmin=vmin,vmax=vmax,interpolation='nearest')
        
        uy=np.array(np.real(mode)[iRow,:])
        uy_imag=np.array(np.imag(mode)[iRow,:])
        ax3.plot(uy)
        ax3.plot(uy_imag,'r')
        maxi=sp.argrelmax(uy)[0]
        mini=sp.argrelmin(uy)[0]
        exti=np.sort(np.r_[maxi,mini])
        
        maxi_imag=sp.argrelmax(uy_imag)[0]
        mini_imag=sp.argrelmin(uy_imag)[0]
        exti_imag=np.sort(np.r_[maxi_imag,mini_imag])        
        
        print np.diff(exti)
        ax3.scatter(maxi,uy[maxi],marker=2)
        ax3.scatter(mini,uy[mini],marker=3)
        ax3.scatter(maxi_imag,uy_imag[maxi_imag],marker=2)
        ax3.scatter(mini_imag,uy_imag[mini_imag],marker=3)       

        ax3.set_xlim([0,np.real(mode).shape[1]])
        gamma=0
        print 'n=',L/(np.diff(maxi)*dx)+gamma
        print 'n=',L/(np.diff(mini)*dx)+gamma
        print 'n=',L/(np.diff(exti)*dx*2.0)+gamma
        
        print 'n=',L/(np.diff(maxi_imag)*dx)+gamma
        print 'n=',L/(np.diff(mini_imag)*dx)+gamma
        print 'n=',L/(np.diff(exti_imag)*dx*2.0)+gamma
Пример #12
0
 def part1(self):
     horiz = argrelmin(self.grid, axis=0)
     vert = argrelmin(self.grid, axis=1)
     horiz_pts = set([(y, x) for (y, x) in zip(*horiz)])
     vert_pts = set([(y, x) for (y, x) in zip(*vert)])
     self.local_min = horiz_pts & vert_pts
     local_min_seq = tuple(zip(*self.local_min))
     risk = (self.grid[local_min_seq] + 1).sum()
     return risk
Пример #13
0
def elliptical_orbit_to_events2(t, w, x_pool, y_pool, z_pool):
    """
    Convert an orbit to MIDI events using Cartesian coordinates and rules.

    For Cartesian orbits...

    Parameters
    ----------
    t : array_like
    w : array_like
    midi_pool : array_like

    """

    x,y,z = w.T[:3]

    # quantize the periods and map on to notes
    x_cross = np.array([argrelmin(xx**2)[0] for xx in x])
    y_cross = np.array([argrelmin(yy**2)[0] for yy in y])
    z_cross = np.array([argrelmin(zz**2)[0] for zz in z])

    r_x = np.sqrt(y**2 + z**2)
    r_y = np.sqrt(x**2 + z**2)
    r_z = np.sqrt(x**2 + y**2)

    q_r_x = quantize(np.sqrt(r_x), nbins=len(x_pool),
                     max=np.sqrt(r_x).min(), min=np.sqrt(r_x).max())
    q_r_y = quantize(np.sqrt(r_y), nbins=len(y_pool),
                     max=np.sqrt(r_y).min(), min=np.sqrt(r_y).max())
    q_r_z = quantize(np.sqrt(r_z), nbins=len(z_pool),
                     max=np.sqrt(r_z).min(), min=np.sqrt(r_z).max())

    delays = []
    notes = []
    for j in range(w.shape[0]):
        _no = []
        for i in range(w.shape[1]):
            if j in x_cross[i]:
                _no.append(x_pool[q_r_x[i,j]])

            if j in y_cross[i]:
                _no.append(y_pool[q_r_y[i,j]])

            if j in z_cross[i]:
                _no.append(z_pool[q_r_z[i,j]])

        if len(_no) > 0:
            delays.append(t[j])
            notes.append(np.unique(_no).tolist())

    delays = np.array(delays)
    notes = np.array(notes)

    return delays, notes
Пример #14
0
    def find_peaks(self, xdata, ydata, domain, std_dev=11):
        from scipy.ndimage.filters import gaussian_filter
        from scipy.signal import argrelmin
        import numpy as np

        xpeaks, ypeaks = self.choose_domain(xdata, ydata, domain)

        ygauss = gaussian_filter(ypeaks, std_dev)

        x_peak_coord = xpeaks[argrelmin(ygauss)[0]]
        y_peak_coord = ypeaks[argrelmin(ygauss)[0]]

        return x_peak_coord, y_peak_coord
Пример #15
0
 def __init__(self, p, lowpass, extrema, slp):
     lw = Lanczos(p, lowpass)
     self.lp = p.rolling(**lw.roll).apply(lw)
     self.p = p
     self.plvmin, = sig.argrelmin(self.lp['PLV'].values.flatten(),
                                  order=extrema)
     self.cnsdmin, = sig.argrelmin(self.lp['CNSD'].values.flatten(),
                                   order=extrema)
     self.cnsdmax, = sig.argrelmax(self.lp['CNSD'].values.flatten(),
                                   order=extrema)
     self.t = pd.DatetimeIndex(
         pd.Series(self.lp.index + pd.Timedelta('4h')).dt.round('D'))
     self.slp = slp.copy()
     self.slp['time'] = pd.DatetimeIndex(pd.Series(slp.time).dt.round('D'))
     self.slp = self.slp - self.slp.mean('time')
Пример #16
0
 def minima_pairs(self, data_index):
     if isinstance(data_index, int):
         data = np.fromiter(self.output.column(data_index), float)
         minima_indices = signal.argrelmin(data, mode="wrap")[0]
         minima_x = np.fromiter(
             (self.output.independent[i] for i in minima_indices), float)
         minima_y = np.fromiter((data[i] for i in minima_indices), float)
         return minima_x, minima_y
     else:
         minima_indices = signal.argrelmin(data_index, mode="wrap")[0]
         minima_x = np.fromiter(
             (self.output.independent[i] for i in minima_indices), float)
         minima_y = np.fromiter((data_index[i] for i in minima_indices),
                                float)
         return minima_x, minima_y
Пример #17
0
    def thresh(self):
        img = cv2.imread('output_images/warped_color.jpg')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        img = img[:, :, 2]
        h = np.asarray(plt.hist(img.ravel(), range=[0, 255]))
        plt.close()

        # Gets all local minima and maxima
        maxi = np.array([h[0][scs.argrelmax(h[0])], h[1][scs.argrelmax(h[0])]])
        mini = np.array([h[0][scs.argrelmin(h[0])], h[1][scs.argrelmin(h[0])]])
        try:
            # finds max peak and two surrounding valleys and removes those pixels from the image
            l1 = np.argmax(mini[1] > maxi[1, np.argmax(maxi[0])])
            # print(np.array_str(mini[1,l1]))
            if l1 != 0:
                img[(mini[1, l1 - 1] < img) & (img < mini[1, l1])] = 0
            else:
                img[img < mini[1, l1]] = 0
        except:
            if maxi.shape[1] > 0:
                # if only one peak remove all values below peak+1
                if maxi.shape[1] == 1:
                    l1 = maxi[1, 0] + 10
                elif maxi.shape[1] > 1:
                    if np.argmax(maxi[0]) < maxi.shape[1] - 1:
                        # finds max peak and two surrounding valleys and removes those pixels from the image
                        l1 = mini[1,
                                  np.argmax(
                                      mini[1] > maxi[1, np.argmax(maxi[0])])]
                        l1 = l1 + (maxi[1, maxi.shape[1] - 1] - l1) / 3
                    # print(np.array_str(mini[1,l1]))
                    else:
                        l1 = maxi[1, maxi.shape[1] - 1] + 10
                else:
                    l1 = 255
            else:
                l1 = 255
            if l1 >= 245:
                l1 = 245
            img[img < l1] = 0
        # Calculate scharr derivatives and adjust
        sx = np.absolute(cv2.Scharr(img, cv2.CV_64F, 1, 0))
        scale_factor = np.max(sx) / 255
        sx = (sx / scale_factor).astype(np.uint8)
        misc.imsave('output_images/thresh.jpg', sx)
        combined = np.zeros_like(sx)
        combined[sx > 0] = 1
        return combined
Пример #18
0
    def pericenter(self, type=np.mean):
        """
        Estimate the pericenter(s) of the orbit. By default, this returns
        the mean pericenter. To get, e.g., the minimum pericenter,
        pass in ``type=np.min``. To get all pericenters, pass in
        ``type=None``.

        Parameters
        ----------
        type : func (optional)
            By default, this returns the mean pericenter. To return all
            pericenters, pass in ``None``. To get, e.g., the minimum
            or maximum pericenter, pass in ``np.min`` or ``np.max``.

        Returns
        -------
        peri : float, :class:`~numpy.ndarray`
            Either a single number or an array of pericenters.
        """
        r = self.r
        min_ix = argrelmin(r, mode='wrap')[0]
        min_ix = min_ix[(min_ix != 0) & (min_ix != (len(r)-1))]

        if type is not None:
            return type(r[min_ix])
        else:
            return r[min_ix]
Пример #19
0
def locate_crossings(s, spectrum):
    ''' '''

    N = spectrum.shape[1]   # number of traced states
    deltas = [spectrum[:,k+1]-spectrum[:,k] for k in range(N-1)]
    deltas = np.array(deltas)

    print(deltas.shape)
    inds, xs = argrelmin(deltas, axis=1, order=1)
    xings = sorted(zip(xs, inds), key=lambda x: (x[0], -x[1]))

    # causal filter
    n_active = 0
    causal_xings = []
    for x, n in xings:
        if n <= n_active:
            causal_xings.append((x, n))
            if n == n_active:
                n_active += 1

    print(causal_xings)

    for x, n in causal_xings:
        analyse_crossing(deltas[n,:], x)


    plt.figure('Deltas')
    plt.plot(s, deltas.T[:,:5], 'x')
    plt.show()
Пример #20
0
def extr(x):
    """Extract the indices of the extrema and zero crossings.
    :param x: input signal
    :type x: array-like
    :return: indices of minima, maxima and zero crossings.
    :rtype: tuple
    """
    m = x.shape[0]

    x1 = x[:m - 1]
    x2 = x[1:m]
    indzer = find(x1 * x2 < 0)
    if np.any(x == 0):
        iz = find(x == 0)
        indz = []
        if np.any(np.diff(iz) == 1):
            zer = x == 0
            dz = np.diff([0, zer, 0])
            debz = find(dz == 1)
            finz = find(dz == -1) - 1
            indz = np.round((debz + finz) / 2)
        else:
            indz = iz
        indzer = np.sort(np.hstack([indzer, indz]))

    indmax = argrelmax(x)[0]
    indmin = argrelmin(x)[0]

    return indmin, indmax, indzer
Пример #21
0
def get_envelops(x, t=None):
    """ Find the upper and lower envelopes of the array `x`.
    """
    if t is None:
        t = np.arange(x.shape[0])
    maxima = argrelmax(x)[0]
    minima = argrelmin(x)[0]

    # consider the start and end to be extrema

    ext_maxima = np.zeros((maxima.shape[0] + 2,), dtype=int)
    ext_maxima[1:-1] = maxima
    ext_maxima[0] = 0
    ext_maxima[-1] = t.shape[0] - 1

    ext_minima = np.zeros((minima.shape[0] + 2,), dtype=int)
    ext_minima[1:-1] = minima
    ext_minima[0] = 0
    ext_minima[-1] = t.shape[0] - 1

    tck = interpolate.splrep(t[ext_maxima], x[ext_maxima])
    upper = interpolate.splev(t, tck)
    tck = interpolate.splrep(t[ext_minima], x[ext_minima])
    lower = interpolate.splev(t, tck)
    return upper, lower
Пример #22
0
    def ground_check(self, occ, show=False):

        try:
            assert hasattr(self, 's') and hasattr(self, 'spectrum')
        except AssertionError:
            print('Spectrum has not yet been solved...')
            return None

        delta = self.spectrum[:,1]-self.spectrum[:,0]
        xs = argrelmin(delta)[0]

        # accept the minimum gap and all sufficients small gaps
        dmin = min(delta[x] for x in xs)

        if show:
            plt.plot(delta)
            plt.show()
        xs = [x for x in xs if delta[x] < GAP_MAX or delta[x]<1.1*dmin]
        if len(xs) != 1:
            return [None]

        # only one gap, extract prob
        dx = int(self.nsteps*.05)
        popt, perr = analyse_crossing(delta, xs[0], dx, show=show)

        print(popt)

        if popt is None:
            return [None,]

        prob = occ[0]*1./np.sum(occ)

        params = [(prob, popt[1], abs(popt[2])/self.nsteps)]
        return params
Пример #23
0
    def extractResponse(self):

        stimStarts = (self.stimStartInds) / self.downSamplingFactor
        stimEnds = (self.stimEndInds) / self.downSamplingFactor
        samplingRateDown = self.vibrationSignalDown.sampling_rate

        self.stimAmps = []
        self.stimFreqs = []
        self.responseVTraces = []
        self.stimTraces = []
        for (stD, endD, st, end) in zip(stimStarts, stimEnds, self.stimStartInds, self.stimEndInds):

            stimDown = self.vibrationSignalDown[stD:endD + 1]
            stimDownFFT = np.fft.rfft(stimDown, n=2048)
            self.stimFreqs.append(np.argmax(np.abs(stimDownFFT)) * samplingRateDown / 2 / len(stimDownFFT))

            stimAS = self.vibrationSignal[st:end + 1]
            stim = stimAS.magnitude
            allAmps = stim[np.concatenate((argrelmin(stim)[0], argrelmax(stim)[0]))]

            self.stimAmps.append(np.abs(allAmps).mean() * self.vibrationSignal.units)

            self.responseVTraces.append(self.voltageSignal[st:end + 1])

            self.stimTraces.append((stimAS - np.mean(stimAS)))
Пример #24
0
def _get_psp_list(bins, neuron_model, di_param, timestep, simtime):
    '''
    Return the list of effective weights from a list of NEST connection
    weights.
    '''
    nest.ResetKernel()
    nest.SetKernelStatus({"resolution":timestep})
    # create neuron and recorder
    neuron = nest.Create(neuron_model, params=di_param)
    vm = nest.Create("voltmeter", params={"interval": timestep})
    nest.Connect(vm, neuron)
    # send the spikes
    times = [ timestep+n*simtime for n in range(len(bins)) ]
    sg = nest.Create("spike_generator", params={'spike_times':times,
                                                'spike_weights':bins})
    nest.Connect(sg, neuron)
    nest.Simulate((len(bins)+1)*simtime)
    # get the max and its time
    dvm = nest.GetStatus(vm)[0]
    da_voltage = dvm["events"]["V_m"]
    da_times = dvm["events"]["times"]
    da_max_psp = da_voltage[ argrelmax(da_voltage) ]
    da_min_psp = da_voltage[ argrelmin(da_voltage) ]
    da_max_psp -= da_min_psp
    if len(bins) != len(da_max_psp):
        raise InvalidArgument("simtime too short: all PSP maxima are not in \
range")
    else:
        plt.plot(da_times, da_voltage)
        plt.show()
        return da_max_psp
Пример #25
0
    def _density_est(self, xpca):
        """
        input: 
            xpca cluster projected onto its first principal component
        output:
            True/False flag, if x has local minima True, False o/w            
        """
        if xpca.shape[0] < 3:
            return False

        kde = KernelDensity()
        h = np.std(xpca) * (4 / 3 / len(xpca)) ** (1 / 5)
        kde.set_params(bandwidth=h).fit(xpca)

        mmin, mmax = np.percentile(xpca, [20, 80])
        xdensity = np.linspace(mmin, mmax, 1000)[:, np.newaxis]  # take .1, .9 quantile
        ydensity = np.exp(kde.score_samples(xdensity))

        local_minimas_idx = argrelmin(ydensity)[0]

        if local_minimas_idx.size == 0:
            flag = False
        else:
            flag = True

        return flag
    def findMAXMIN(znach, frame, smoth, orderMAX, orderMIN):

        filtered = lowess(znach, frame, is_sorted=True, frac=smoth, it=0)  # 0.02


        # pixel, value = np.array(frame),np.array(znach)
        pixel, value = np.array(filtered[:, 0]), np.array(filtered[:, 1])

        mean_line = statistics.mean(value)
        meanLine = [mean_line for i in range(len(pixel))]

        # value=znach
        maxTemp = argrelmax(value, order=orderMAX)
        maxes = []

        for maxi in maxTemp[0]:
            # if value[maxi] > 0:
            maxes.append(maxi)


        pixel, value = np.array(filtered[:, 0]), np.array(filtered[:, 1])
        minTemp = argrelmin(value, order=orderMIN)
        mines = []
        for mini in minTemp[0]:
            # if value[mini] < 0:
            mines.append(mini)



        return mines, maxes
Пример #27
0
    def extractResponse(self):

        stimStarts = (self.stimStartInds) / self.downSamplingFactor
        stimEnds = (self.stimEndInds) / self.downSamplingFactor
        samplingRateDown = self.vibrationSignalDown.sampling_rate

        self.stimAmps = []
        self.stimFreqs = []
        self.responseVTraces = []
        self.stimTraces = []
        for (stD, endD, st, end) in zip(stimStarts, stimEnds,
                                        self.stimStartInds, self.stimEndInds):

            stimDown = self.vibrationSignalDown[stD:endD + 1]
            stimDownFFT = np.fft.rfft(stimDown, n=2048)
            self.stimFreqs.append(
                np.argmax(np.abs(stimDownFFT)) * samplingRateDown / 2 /
                len(stimDownFFT))

            stimAS = self.vibrationSignal[st:end + 1]
            stim = stimAS.magnitude
            allAmps = stim[np.concatenate(
                (argrelmin(stim)[0], argrelmax(stim)[0]))]

            self.stimAmps.append(
                np.abs(allAmps).mean() * self.vibrationSignal.units)

            self.responseVTraces.append(self.voltageSignal[st:end + 1])

            self.stimTraces.append((stimAS - np.mean(stimAS)))
Пример #28
0
    def find_dystolic_peak(self, signal, dev1, dev2, systolic_peak_i,
                           dychrotic_notch_i):
        dev1_after_dn = dev1[dychrotic_notch_i:]
        dev2_after_dn = dev2[dychrotic_notch_i:]

        maxs_dev1_after_sp = argrelmax(dev1_after_dn)[0]
        dev1_after_dn_before_first_max = np.array(dev1_after_dn)

        if maxs_dev1_after_sp.shape[0] > 0:
            dev1_after_dn_before_first_max = dev1_after_dn[:maxs_dev1_after_sp[
                0]]

        found_confidence = 0.0
        diastolic_peak_index = self.find_zero_crossing(
            dev1_after_dn_before_first_max, which="neg_to_pos")

        if diastolic_peak_index != 0:
            found_confidence = 1.0
        else:
            # then look at the zero crossing pos_to_neg in dev2
            diastolic_peak_index = self.find_zero_crossing(dev2_after_dn,
                                                           which="pos_to_neg")
            if diastolic_peak_index != 0:
                found_confidence = 0.75
            else:
                # the first minimum in dev2 after the dychrotic_notch
                mins = argrelmin(dev2_after_dn)[0]
                if mins.shape[0] > 0:
                    diastolic_peak_index = mins[0]
                    found_confidence = 0.5

        if diastolic_peak_index + dychrotic_notch_i >= len(signal) - 2:
            found_confidence = 0.0

        return diastolic_peak_index + dychrotic_notch_i, found_confidence
Пример #29
0
    def _find_peaks(self, col='conv', kind='min', kappa=3.0, **kwargs):

        y = self._safe(self._t[col])
        min_idx = np.hstack(argrelmin(y, **kwargs))
        max_idx = np.hstack(argrelmax(y, **kwargs))
        ext_idx = np.sort(np.append(min_idx, max_idx))
        ext = self._copy(ext_idx)
        if len(ext.t) > 0:
            # Set xmin and xmax from adjacent extrema
            ext.xmin = np.append(self.x[0], ext.x[:-1])
            ext.xmax = np.append(ext.x[1:], self.x[-1])

            diff_y_left = (ext._t[col][:-2] - ext._t[col][1:-1])
            diff_y_right = (ext._t[col][2:] - ext._t[col][1:-1])
            if kind == 'max':
                diff_y_left = -diff_y_left
                diff_y_right = -diff_y_right

        # Check if the difference is above threshold
        #for m,M,l,r in zip(ext.xmin, ext.xmax, diff_y_left, diff_y_right):

        #    print(m,M,l,r)
        diff_y_max = np.minimum(diff_y_left, diff_y_right)

        # +1 is needed because sel is referred to the [1:-1] range of rows
        # in the spectrum
        sel = np.where(np.greater(diff_y_max, ext.dy[1:-1] * kappa))[0]+1
        lines = ext._copy(sel)

        return lines
Пример #30
0
def find_local_minima(ar):
    # Try to find the optional cut-off that may help determine the 19 points on
    # the go board. Start with an interval [min_val, max_val] and squeeze until
    # it hits exactly 19 points
    # Find indices that correspond to local minima
    x = argrelmin(ar)
    idx_list = x[0]

    target = 19
    min_val, max_val = np.amin(ar), 100.0

    # Assert that above choices are good
    assert sum(ar[i] <= min_val for i in idx_list) < target
    assert sum(ar[i] <= max_val for i in idx_list) > target

    # Find the cut-off below which there are exactly 19 local minima
    while True:
        new_val = 0.5 * min_val + 0.5 * max_val
        if sum(ar[i] <= new_val for i in idx_list) < target:
            min_val = new_val
        elif sum(ar[i] <= new_val for i in idx_list) > target:
            max_val = new_val
        elif sum(ar[i] <= new_val for i in idx_list) == target:
            break

    # Find the indices
    return [i for i in idx_list if ar[i] <= new_val]
Пример #31
0
def peak_trough_untrend(data, last_trend_loc, untrend_loc, high_col, low_col):
    #
    data_lasttrend = data.loc[last_trend_loc:].copy()
    High_uptonow = data_lasttrend[high_col]
    Low_uptonow = data_lasttrend[low_col]

    # Find support and resistance using scipy
    peaks2 = argrelmax(High_uptonow.values, order=2)
    peak = pd.Series(False, index=High_uptonow.index)
    peak.iloc[peaks2] = True
    peak_only = peak[peak]
    #
    troughs2 = argrelmin(Low_uptonow.values, order=2)
    trough = pd.Series(False, index=Low_uptonow.index)
    trough.iloc[troughs2] = True
    trough_only = trough[trough]
    #
    high_peak = '{}-peak'.format(high_col)
    data_lasttrend[high_peak] = peak
    low_trough = '{}-trough'.format(low_col)
    data_lasttrend[low_trough] = trough
    #
    ohlc_col = ['Date', 'Open', 'High', 'Low', 'Close']
    ohlc_rest = list(set(data_lasttrend.columns.tolist()) - set(ohlc_col))
    ohlc_col = ohlc_col + ohlc_rest
    ohlc = data_lasttrend.loc[untrend_loc:, ohlc_col]
    #
    return ohlc, high_peak, low_trough
Пример #32
0
def locate_crossings(s, spectrum):
    ''' '''

    N = spectrum.shape[1]  # number of traced states
    deltas = [spectrum[:, k + 1] - spectrum[:, k] for k in range(N - 1)]
    deltas = np.array(deltas)

    print(deltas.shape)
    inds, xs = argrelmin(deltas, axis=1, order=1)
    xings = sorted(zip(xs, inds), key=lambda x: (x[0], -x[1]))

    # causal filter
    n_active = 0
    causal_xings = []
    for x, n in xings:
        if n <= n_active:
            causal_xings.append((x, n))
            if n == n_active:
                n_active += 1

    print(causal_xings)

    for x, n in causal_xings:
        analyse_crossing(deltas[n, :], x)

    plt.figure('Deltas')
    plt.plot(s, deltas.T[:, :5], 'x')
    plt.show()
Пример #33
0
 def closest_index(value):
     peak_init = np.argmin(np.abs(value - f))
     peak = argrelmin(
         magnitude[peak_init - 5:peak_init +
                   5])[0][:1]  # first dimension only first item
     peak = peak_init - 5 + peak[0] if peak else None
     return peak
Пример #34
0
def find_extrema(signal):
    signal = np.array(signal)
    extrema_index = np.sort(
        np.unique(np.concatenate(
            (argrelmax(signal)[0], argrelmin(signal)[0]))))
    extrema = signal[extrema_index]
    return zip(extrema_index.tolist(), extrema.tolist())
Пример #35
0
def import_idl(filename = '../../../Data/K16/kic126corr_n.sav', num = 400):
	idlfile = rs(filename);
	ident = np.array(idlfile['cont']).astype('int');
	flux = np.array(idlfile['flux']).astype('float64');
	cadence = np.array(idlfile['time']).astype('float64');

	for a in np.unique(ident): #data quarter
		mean = np.average(flux[idlfile['cont'] == a]);
		data = [cadence[idlfile['cont'] == a], flux[idlfile['cont'] == a]];
		print len(data[0]), len(data[1]);
		k =  movavg_final(data, num);
		
		flux[np.where(idlfile['cont'] == a)[0]] -= k.astype('float64');

		#gp2(np.array([cadence[idlfile['cont'] == a], flux[idlfile['cont'] == a]]), block_size = 2000)[2];
		#flux[idlfile['cont'] == a] -= mean;
		flux[idlfile['cont'] == a] /= mean;

	arm = argrelmin(flux)[0];
	arm = arm[flux[arm] < -0.005]
	for u in arm:
		fluxbase = flux[max(0,u - int(num)):min(len(flux), u + int(num))];
		fluxbase_mean = np.average(fluxbase[fluxbase > 0])
		flux[max(0,u - int(num)):min(len(flux), u + int(num))] -= fluxbase_mean;

	return cadence, flux;
Пример #36
0
def calc_tiers(teams, year, week, bw=0.09, order=4, show=False):
    '''Calculate 3-5 tiers using Gaussian Kernal Density'''
    logger.info('Calculating tiers for power rankings')
    # Store rankings in list
    ranks = [t.rank.power for t in teams]
    # Calculate the Kernal Density Estimation
    kde = gaussian_kde(ranks, bw_method=bw)
    # Make plot
    x_grid = np.linspace(min(ranks) - 10., max(ranks) + 10., len(ranks) * 10)
    f2 = plt.figure(figsize=(10, 6))
    plt.plot(x_grid, kde(x_grid))
    if show: plt.show()
    # Create directory if it doesn't exist to save plot
    out_dir = Path(f'output/{year}/week{week}')
    out_dir.mkdir(parents=True, exist_ok=True)
    out_name = out_dir / 'tiers.png'
    f2.savefig(out_name)
    plt.close()
    logger.info(f'Saved tiers plot to local file {out_name.resolve()}')
    # Find minima to define tiers, separted by at least +/- order
    minima = x_grid[argrelmin(kde(x_grid), order=order)[0]]
    s_min = sorted(minima, reverse=True)
    tier = 1
    # Build tiers from minima
    for t in teams:
        # lowest tier
        if tier > len(s_min):
            tier += 0
        # if rank below current minima, create new tier
        elif t.rank.power < s_min[tier - 1]:
            if tier < 5: tier += 1
        # Save tier
        t.rank.tier = tier
Пример #37
0
def lineSegmentation(img, kernelSize=25, sigma=11, theta=7):
    """Scale space technique for lines segmentation proposed by R. Manmatha:
	http://ciir.cs.umass.edu/pubfiles/mm-27.pdf
    Args:
		img: image of the text to be segmented on lines.
        kernelSize: size of filter kernel, must be an odd integer.
		sigma: standard deviation of Gaussian function used for filter kernel.
		theta: approximated width/height ratio of words, filter function is distorted by this factor.
		minArea: ignore word candidates smaller than specified area.
	Returns:
		List of lines (segmented input img)
	"""

    img_tmp = np.transpose(
        prepareTextImg(img))  # image to be segmented (un-normalized)
    img_tmp_norm = normalize(img_tmp)
    k = createKernel(kernelSize, sigma, theta)
    imgFiltered = cv2.filter2D(img_tmp_norm,
                               -1,
                               k,
                               borderType=cv2.BORDER_REPLICATE)
    img_tmp1 = normalize(imgFiltered)
    # Make summ elements in columns to get function of pixels value for each column
    summ_pix = np.sum(img_tmp1, axis=0)
    smoothed = smooth(summ_pix, 35)
    mins = np.array(argrelmin(smoothed, order=2))
    found_lines = transpose_lines(crop_text_to_lines(img_tmp, mins[0]))
    return found_lines
Пример #38
0
def _get_psp_list(bins, neuron_model, di_param, timestep, simtime):
    '''
    Return the list of effective weights from a list of NEST connection
    weights.
    '''
    nest.ResetKernel()
    nest.SetKernelStatus({"resolution": timestep})
    # create neuron and recorder
    neuron = nest.Create(neuron_model, params=di_param)
    vm = nest.Create("voltmeter", params={"interval": timestep})
    nest.Connect(vm, neuron)
    # send the spikes
    times = [timestep + n * simtime for n in range(len(bins))]
    sg = nest.Create("spike_generator",
                     params={
                         'spike_times': times,
                         'spike_weights': bins
                     })
    nest.Connect(sg, neuron)
    nest.Simulate((len(bins) + 1) * simtime)
    # get the max and its time
    dvm = nest.GetStatus(vm)[0]
    da_voltage = dvm["events"]["V_m"]
    da_times = dvm["events"]["times"]
    da_max_psp = da_voltage[argrelmax(da_voltage)]
    da_min_psp = da_voltage[argrelmin(da_voltage)]
    da_max_psp -= da_min_psp
    if len(bins) != len(da_max_psp):
        raise InvalidArgument("simtime too short: all PSP maxima are not in "
                              "range.")
    else:
        return da_max_psp
Пример #39
0
    def pericenter(self, type=np.mean):
        """
        Estimate the pericenter(s) of the orbit. By default, this returns
        the mean pericenter. To get, e.g., the minimum pericenter,
        pass in ``type=np.min``. To get all pericenters, pass in
        ``type=None``.

        Parameters
        ----------
        type : func (optional)
            By default, this returns the mean pericenter. To return all
            pericenters, pass in ``None``. To get, e.g., the minimum
            or maximum pericenter, pass in ``np.min`` or ``np.max``.

        Returns
        -------
        peri : float, :class:`~numpy.ndarray`
            Either a single number or an array of pericenters.
        """
        r = self.r
        min_ix = argrelmin(r, mode='wrap')[0]
        min_ix = min_ix[(min_ix != 0) & (min_ix != (len(r)-1))]

        if type is not None:
            return type(r[min_ix])
        else:
            return r[min_ix]
def function(znach, frac, orderMin, orderMax):
    import numpy as np
    frame = np.arange(0, len(znach), 1)

    from statsmodels.nonparametric.smoothers_lowess import lowess
    filtered = lowess(znach, frame, is_sorted=True, frac=frac, it=0)  # 0.02

    import numpy as np
    import matplotlib.pyplot as plt
    from scipy import loadtxt, optimize
    from scipy.signal import argrelmax
    from scipy.signal import argrelmin

    # pixel, value = np.array(frame),np.array(znach)
    pixel, value = np.array(filtered[:, 0]), np.array(filtered[:, 1])

    maxTemp = argrelmax(value, order=orderMax)
    maxes = []

    for maxi in maxTemp[0]:
        # if value[maxi] > 0:
        maxes.append(maxi)

    pixel, value = np.array(filtered[:, 0]), np.array(filtered[:, 1])
    minTemp = argrelmin(value, order=orderMin)
    mines = []
    for mini in minTemp[0]:
        # if value[mini] < 0:
        mines.append(mini)

    result = []
    result.append(list(minTemp[0]))
    result.append(list(maxTemp[0]))

    return result
Пример #41
0
def find_relative_mins(arr, order, nb_relative_mins):
    '''
    Find the local / relative minima in a 2D array (find the per-column mins).
    
    Parameters
    ----------
    arr : np.ndarray (n x m)
        The array for which we find the local mins (find the per-column mins).
        
    order : int
        How many points on each side to use for the comparison
        to consider ``comparator(n, n+x)`` to be True.
        
    nb_relative_mins : int
        How many local minima we want to return. 
        We select the nb_relative_mins of the minima.
        
    Returns
    -------    
    relative_mins_idxs : np.ndarray (nb_relative_mins x m)
        The indices of the local minima. If there were not enough minima, we return NaNs.

    relative_mins_values : np.ndarray (nb_relative_mins x m)
        The values of the local minima. If there were not enough minima, we return NaNs.
            
    '''
    
    # Import the local min function from Scipy here as we won't 
    # use it anywhere else
    from scipy.signal import argrelmin
    
    arr_copy = arr.copy()
    
    # Get the shape and replace the NaNs with +inf
    nb_col = arr_copy.shape[1]
    arr_copy[np.isnan(arr_copy)] = np.inf
    
    # Set up placeholders for the output
    relative_mins_idxs = np.ones((nb_relative_mins, nb_col))*np.nan
    relative_mins_values = np.ones((nb_relative_mins, nb_col))*np.nan
    
    # Loop over columns
    for k in range(nb_col):
        
        # Get the indices of the local mins (all of them at a given order)        
        idxs = argrelmin(arr_copy[:, k], order=order, mode='wrap')[0]
        
        # Sort the local mins
        values = arr_copy[idxs, k]
        idxs_to_sort = np.argsort(values)
        idxs_sorted = idxs[idxs_to_sort]
        values_sorted = values[idxs_to_sort]
        
        # Only return the smallest mins 
        nb_idxs = len(idxs)        
        mask = np.arange(0, min(nb_idxs, nb_relative_mins))
        relative_mins_idxs[mask, k] = idxs_sorted[mask]
        relative_mins_values[mask, k] = values_sorted[mask]
        
    return(relative_mins_idxs, relative_mins_values)
Пример #42
0
    def _density_est(self, x):
        """
        input: 
            x n_objects x n_features ndarray
            
        output:
            xpca cluster projected onto its first principal component
            xmin x value of minimal minima of produced kernel density
            ymin y value of minimal minima of produced kernel density
        """
        if x.shape[0] < 3: return np.nan, np.nan, np.nan

        pca = PCA(n_components=1, random_state=self.random_state)
        kde = KernelDensity()

        xpca = pca.fit_transform(x)
        h = np.std(xpca) * (4 / 3 / len(xpca)) ** (1 / 5)
        kde.set_params(bandwidth=h).fit(xpca)

        mmin, mmax = np.percentile(xpca, [5, 95])
        xdensity = np.linspace(mmin, mmax, 1000)[:, np.newaxis]
        ydensity = np.exp(kde.score_samples(xdensity))  # think about this 1000
        # number 1000 seems pretty arbitrary for me
        local_minimas_idx = argrelmin(ydensity)[0]
        if local_minimas_idx.size == 0:
            return xpca, np.nan, np.nan
        else:
            idx = ydensity[local_minimas_idx].argmin()
            xmin = xdensity[local_minimas_idx[idx]]
            ymin = ydensity[local_minimas_idx[idx]]

        return xpca, xmin, ymin
    def spline_max_growth_rate(self, s, droplow=False):

        ### N.B.: set parameter of -2.3 for dropping low OD values from analysis - i.e., OD 0.1###
        if droplow: data = np.where(self.log_data < -2.3, 'nan', self.log_data)
        else: data = self.log_data
        interpolator = interpolate.UnivariateSpline(self.elapsed_time, data, k=4, s=s)  #k can be 3-5
        der = interpolator.derivative()

        # Get the approximation of the derivative at all points
        der_approx = der(self.elapsed_time)

        # Get the maximum
        self.maximum_index = np.argmax(der_approx)
        self.growth_rate = der_approx[self.maximum_index]
        self.doubling_time = np.log(2)/self.growth_rate
        self.time_of_max_rate = self.elapsed_time[self.maximum_index]

        # Get estimates of lag time and saturation time from 2nd derivative
        der2 = der.derivative()
        der2_approx = der2(self.elapsed_time)
        try: self.lag_index = signal.argrelmax(der2_approx)[0][0]  # find first max
        except: self.lag_index = 0
        if self.lag_index > self.maximum_index: self.lag_index = 0
        self.lag_time = self.elapsed_time[self.lag_index]
        self.lag_OD = self.raw_data[self.lag_index]
        minima = signal.argrelmin(der2_approx)[0]  # find first min after maximum_index
        which_min = bisect.bisect(minima, self.maximum_index)
        try: self.sat_index = minima[which_min]
        except: self.sat_index = len(self.elapsed_time) - 1
        self.sat_time = self.elapsed_time[self.sat_index]
        self.sat_OD = self.raw_data[self.sat_index]

        self.spline = interpolator(self.elapsed_time)
        self.intercept = self.log_data[self.maximum_index] - (self.growth_rate*self.time_of_max_rate) # b = y - ax
        self.fit_y_values = [((self.growth_rate * x) + self.intercept) for x in self.elapsed_time]  # for plotting
Пример #44
0
def get_maximas_minimas(data_set):
    data_set_array = np.array(data_set)

    maximas = argrelmax(data_set_array, order=1)
    minimas = argrelmin(data_set_array, order=1)

    return maximas, minimas
Пример #45
0
	def relativeExtremaSegments(self, rawData, maxMin="max", minSegSize=50):
		from scipy.signal import argrelmax, argrelmin
		PCs = pca(rawData, n_components=1)[0]
		if maxMin == 'max':
			return argrelmax(PCs[:,0], order=minSegSize)[0]
		if maxMin == 'min':
			return argrelmin(PCs[:,0], order=minSegSize)[0]
    def get_pls_min_index(self, pls_cutoff_frequency=PLS_SUPER_CUTOFF_FREQUENCY):
        # 3.5Hzでスムージングした曲線の最小値の+-200ms以内に最小値が存在する
        MIN_POINTS_WINDOW = 200 #+-200[ms]以内
        length = self.data.shape[0]
        num = length / FFT_WINDOW_NUM + 1
        pls_smooth = []
        logging.debug(num)
        for i in np.arange(num):
            logging.debug(i)
            if i == num - 1:
                pls_fftfreq = np.fft.fftfreq(length - FFT_WINDOW_NUM * (num - 1), 1.0 / SAMPLING_FREQUENCY)
                pls_fft = np.fft.fft( self.data[PLS_NORMALIZED].values[FFT_WINDOW_NUM * i:])
            else:
                pls_fftfreq = np.fft.fftfreq( FFT_WINDOW_NUM, 1.0/SAMPLING_FREQUENCY)
                pls_fft = np.fft.fft( self.data[PLS_NORMALIZED].values[FFT_WINDOW_NUM * i:FFT_WINDOW_NUM * (i + 1) ] )

            pls_fft[ (pls_fftfreq > pls_cutoff_frequency) | (pls_fftfreq < -pls_cutoff_frequency) ] = 0
            pls_smooth.extend( np.real(np.fft.ifft(pls_fft)) )
        self.data['PLS(smooth)'] = pls_smooth
        min_point_indexes_of_smooth = signal.argrelmin(self.data['PLS(smooth)'].values)[0]
        min_indexes = []
        for index in min_point_indexes_of_smooth:
            if index > 100:
                min_index = self.data[PLS_NORMALIZED].iloc[index - 100:index + 100].argmin()
            else:
                min_index = self.data[PLS_NORMALIZED].iloc[0:index + 100].argmin()
            min_indexes.append(min_index)
        min_indexes = pd.unique(min_indexes)
        self.data[PLS_MIN] = self.data[PLS_NORMALIZED][min_indexes]
        logging.debug(self.data[PLS_MIN])
        return self.data
Пример #47
0
 def argers(gamma):
     inds=list(argrelmin(absolute(-gamma/f0*(sin(2*X)-2*X)/(2*X**2)-X/(Np*pi)))[0])
     data=[f0,]
     if len(inds)>0:
         data.extend(f[inds])
     else:
         data=[f0,f0,f0]
     return data
Пример #48
0
def compute_mins(xlocs, yvals, window_size = 10):
    yval_locs = argrelmin(yvals, order = window_size)[0]
    if len(yval_locs) == 0:
        return [];
    minPoints = xlocs[yval_locs]
    list1 = []
    for i in minPoints:
        list1.append(i)
    return list1
Пример #49
0
def getrelmaximas(dataMatrix, Sensor,Min = False):
    extrema = []
    if Min==False:
        for Sensors in Sensor:
            extrema.append(dataMatrix[argrelmax(dataMatrix[:,Sensors],order=10),Sensors][0])
    else:
        for Sensors in Sensor:
            extrema.append(dataMatrix[argrelmin(dataMatrix[:,Sensors],order=10),Sensors][0])
    return pd.DataFrame.transpose(pd.DataFrame(extrema))
Пример #50
0
def getminimas(dataMatrix, Sensor=[290]):
    signal = dataMatrix[:,Sensor[0]]
    maxAbsValue, maxAbsFreq = FourierTransformation.maxAbsFreq(signal[0:13000])
    Filtered = FeatureKonstruktion.filter(dataMatrix,Sensor,maxAbsFreq)
    print maxAbsFreq,maxAbsValue
    plt.plot(signal)
    plt.show()

    return argrelmin(Filtered[:,Sensor],order=25)
Пример #51
0
def surface_of_section(orbit, plane_ix, interpolate=False):
    """
    Generate and return a surface of section from the given orbit.

    .. warning::

        This is an experimental function and the API may change.

    Parameters
    ----------
    orbit : `~gary.dynamics.CartesianOrbit`
    plane_ix : int
        Integer that represents the coordinate to record crossings in. For
        example, for a 2D Hamiltonian where you want to make a SoS in
        :math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
        :math:`x` axis), and this will only record crossings for which
        :math:`p_x>0`.
    interpolate : bool (optional)
        Whether or not to interpolate on to the plane of interest. This
        makes it much slower, but will work for orbits with a coarser
        sampling.

    Returns
    -------

    Examples
    --------
    If your orbit of interest is a tube orbit, it probably conserves (at
    least approximately) some equivalent to angular momentum in the direction
    of the circulation axis. Therefore, a surface of section in R-z should
    be instructive for classifying these orbits. TODO...show how to convert
    an orbit to Cylindrical..etc...

    """

    w = orbit.w()
    if w.ndim == 2:
        w = w[..., None]

    ndim, ntimes, norbits = w.shape
    H_dim = ndim // 2
    p_ix = plane_ix + H_dim

    if interpolate:
        raise NotImplementedError("Not yet implemented, sorry!")

    # record position on specified plane when orbit crosses
    all_sos = np.zeros((ndim, norbits), dtype=object)
    for n in range(norbits):
        cross_ix = argrelmin(w[plane_ix, :, n] ** 2)[0]
        cross_ix = cross_ix[w[p_ix, cross_ix, n] > 0.0]
        sos = w[:, cross_ix, n]

        for j in range(ndim):
            all_sos[j, n] = sos[j, :]

    return all_sos
Пример #52
0
def min_data(x_data, y_data, width, no_peaks):
    min_ind = signal.argrelmin(y_data, np.array(width))
    #print peakind
    #plt.show()
    x_mins = map(lambda x: x_data[x], min_ind)
    #print time_peaks
    y_mins = map(lambda x: y_data[x], min_ind)
    #print p_peaks
    return x_mins, y_mins
Пример #53
0
def detrend_star(cadence, flux, num = 20):
        arm = argrelmin(flux)[0];
        arm = arm[flux[arm] < -0.005]
        for u in arm:
                fluxbase = flux[max(0,u - int(num)):min(len(flux), u + int(num))];
                fluxbase_mean = np.average(fluxbase[fluxbase > 0])
                flux[max(0,u - int(num)):min(len(flux), u + int(num))] -= fluxbase_mean;

	return cadence, flux
Пример #54
0
 def test_residue(self):
     """Test the residue of the emd output."""
     signal = np.sum([self.trend, self.mode1, self.mode2], axis=0)
     decomposer = EMD(signal, t=self.ts)
     imfs = decomposer.decompose()
     n_imfs = imfs.shape[0]
     n_maxima = argrelmax(imfs[n_imfs - 1, :])[0].shape[0]
     n_minima = argrelmin(imfs[n_imfs - 1, :])[0].shape[0]
     self.assertTrue(max(n_maxima, n_minima) <= 2)
def getnormallength(xs, dxs, span=3):
    """
    Calculate the length of the normal vector starting from each point on the edge of a closed curve and terminating at the first intersection with another point on the curve.

    args:
        xs (ndarray): coordinates along the curve
        dxs (ndarray): derivatives (of parametric spatial coordinates)

    kwargs:
        span (int): size of moving average filter to apply to final result

    returns:
        ds (ndarray): distances of each line connecting two points on the curve

    """

    # Remove repeated values in the (periodic) input array
    if np.all(np.abs(xs[0] - xs[-1]) < 1e-10):
        xs = xs[:-1]
        dxs = dxs[:-1]

    # Index of all coordinates to loop over
    n = len(xs)
    j = np.arange(n)

    # Remove immediately-adjacent points
    a = 25
    h = -a + j[2 * a + 1:]

    # Average over small window of adjacent points to calculate the distance
    r = np.arange(-4, 5)

    ds = np.ones(n) * np.nan
    for i, (x, dx) in enumerate(zip(xs, dxs)):
        # Calculate the slope of the normal vector
        m = -dx[0] / dx[1]

        # Remove neighboring points from the calculation
        k = np.take(j, i + h, mode='wrap')

        # Find the point that comes closest to intersecting the normal vector
        z = np.sum([-m, 1] * (xs[k] - x), axis=1)
        y = np.abs(z - 0.)
        p = argrelmin(y, order=10, mode='wrap')
        if np.any(p):
            q = k[p][y[p] < 5]
            if np.any(q):
                if len(q) == 1:
                    q = q[0]
                else:
                    q = q[np.argmin(np.sqrt(np.sum((xs[q] - x) ** 2, axis=1)))]

                # Calculate the distance between this point and its neighbors
                v = (xs[np.mod(q + r, n)] - x) ** 2
                ds[i] = np.mean(np.sqrt(np.sum(v, axis=1)))
    return movingaverage(ds, span)
Пример #56
0
def l2_metric(l2, response, sample_size):
    l2 = np.convolve(l2, np.ones(10)/10, mode='same')
    peaks = argrelmin(l2)[0]
    peak_values = l2[peaks]
    samples = np.argsort(peak_values)[:sample_size]
    response = response[peaks[samples]]
    weights = l2[peaks[samples]]
    weights = np.exp(-weights + min(weights))
    weights = weights / np.sum(weights)
    return np.sum(response * weights), np.std(response)
Пример #57
0
def DetectVisPup(im, avgPup, initPupPos, binNum = 2**6-1, kHistMed = 3, convHullTH = 0.7,
                 closePtsTH = 5):
  ellCenter, ellVertices, deg = None, None, None
  negIm = 1 - im
  ## Article stuff dont work very well
  ##
  #hist = cv2.calcHist([negIm],[0],None,[binNum],[0,binNum])
  hist,bins = np.histogram(negIm.ravel(),binNum,[0,1])
  medHist = signal.medfilt(hist, kHistMed)
  #locExtrm = signal.argrelextrema(medHist, np.less)
  locExtrm = signal.argrelmin(hist)
  if locExtrm[0].tolist() == []:
    return None, None, None
  pupTH = np.max(locExtrm[0][-2]/np.float64(binNum)) - 0.05
  ##

  #pupTH = avgPup
  pupBW = np.zeros((im.shape[0], im.shape[1]), 'uint8')
  pupBW[im<avgPup]=255
  _, contours, hierarchy = cv2.findContours(pupBW, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
  if contours == []:
    print("No contours")
    return None, None, None
  contours = [cv2.convexHull(contour) for contour in contours]
  contoursArea = np.asarray([cv2.contourArea(contour) for contour in contours])
  maxContourArea = np.max(contoursArea)
  maxContourInd = np.nonzero(maxContourArea==contoursArea)[0][0]
  maxContour = contours[maxContourInd]
  if maxContourArea < np.sum(contoursArea) * convHullTH:
    mmnts = [cv2.moments(c) for c in contours]
    CM = [(int(M['m10']/M['m00']), int(M['m01']/M['m00'])) if M['m00']!=0 else (np.nan, np.nan) for M in mmnts ]
    maxCM = CM[maxContourInd]
    distCM = [np.sqrt((maxCM[0] - otherCM[0])**2 + (maxCM[1] - otherCM[1])**2) for otherCM in CM]
    _, radius = maxExtentLargstBlob = cv2.minEnclosingCircle(contours[maxContourInd])
    nearByContInd = np.nonzero([dCM < radius/2 for dCM in distCM])[0]
    maxContour = cv2.convexHull(np.vstack(np.asarray([contours[ii] for ii in nearByContInd]))).squeeze()
    #newContours = [contours[ii] for ii in range(len(contours)) if not ii in nearByContInd]
    #newContours.append(largeContour)
  ## sparse up contour points
  #contPntDist =  distance.squareform(distance.pdist(np.asarray(maxContour).squeeze()))
  #tooClosePntsInd = np.where(contPntDist < closePtsTH)
  ##contDist = distance.squareform(distance.pdist(np.asarray(maxContour).squeeze()))
  #for i in range(len(contPntDist)):
  #  for j in range(len(contPntDist)):
  #    if i < j and :
  ##
  ## remove colinear points
  minRow = np.min(maxContour[:, 0])
  maxRow = np.max(maxContour[:, 0])
  ##
  ## Elipse fit
  if len(maxContour)>5:
    ellCenter, ellVertices, deg = cv2.fitEllipse(maxContour)

  return ellCenter, ellVertices, deg
Пример #58
0
def callValleys(lane, gain=7, hamming=5, filt=0.2, order=9):
    """ Identify vallies in the lane 
    
    :Args:
        :param lane: The lane which to call valleys.
        :type lane: tapeAnalyst.gel_processing.GelLane

        :param gain: The gain value to use for increasing contrast (see
            skimage.exposure.adjust_sigmoid)
        :type gain: int

        :param hamming: The value to use Hamming convolution (see
            scipy.signal.hamming)
        :type gain: int

        :param filt: Remove all pixels whose intensity is below this value.
        :type filt: float

        :param order: The distance allowed for finding maxima (see scipy.signal.argrelmax)
        :type order: int

    """
    # Increase contrast to help with peak calling
    ladj = exposure.adjust_sigmoid(lane.lane, cutoff=0.5, gain=gain)

    # Tack the max pixel intensity for each row in then lane's gel image.
    laneDist = ladj.max(axis=1)

    # Smooth the distribution
    laneDist = signal.convolve(laneDist, signal.hamming(hamming))

    # Get the locations of the dye front and dye end. Peak calling is difficult
    # here because dyes tend to plateau. To aid peak calling, add an artificial
    # spike in dye regions. Also remove all peaks outside of the dyes
    try:
        dyeFrontPeak = int(np.ceil(np.mean([lane.dyeFrontStart, lane.dyeFrontEnd])))
        laneDist[dyeFrontPeak] = laneDist[dyeFrontPeak] + 2
        laneDist[dyeFrontPeak + 1 :] = 0
    except:
        logger.warn("No Dye Front - Lane {}: {}".format(lane.index, lane.wellID))

    try:
        dyeEndPeak = int(np.ceil(np.mean([lane.dyeEndStart, lane.dyeEndEnd])))
        laneDist[dyeEndPeak] = laneDist[dyeEndPeak] + 2
        laneDist[: dyeEndPeak - 1] = 0
    except:
        logger.warn("No Dye End - Lane {}: {}".format(lane.index, lane.wellID))

    # Filter out low levels
    laneDist[laneDist < filt] = 0

    # Find local maxima
    valleys = signal.argrelmin(laneDist)[0]

    return valleys
Пример #59
0
 def test_mexhat(self):
     ideal = np.array([-4.36444274e-09, -4.29488427e-04, -1.47862882e-01,
                       4.43113463e-01, -1.47862882e-01, -4.29488427e-04,
                       -4.36444274e-09])
     actual = misc.mexhat(0.5)
     np.testing.assert_allclose(ideal, actual, atol=1e-9, rtol=1e-9)
     maxima = argrelmax(actual)
     self.assertEqual(maxima[0].shape[0], 1)
     self.assertEqual(maxima[0][0], 3)
     minima = argrelmin(actual)
     self.assertCountEqual(minima[0], (2, 4))
Пример #60
0
    def _get_smoothed_extremas(self):
        min_x = spsignal.argrelmin(self._smoothed_spectrum)[0]
        max_x = spsignal.argrelmax(self._smoothed_spectrum)[0]
        min_y = self._smoothed_spectrum[min_x]
        max_y = self._smoothed_spectrum[max_x]
        merged = zip(min_x, min_y) + zip(max_x, max_y)
        merged.sort(lambda x, y: cmp(x[0], y[0]))

        merged.insert(0, (0, self._smoothed_spectrum[0]))
        last = len(self._smoothed_spectrum) -1
        merged.append((last, self._smoothed_spectrum[last]))
        return merged