Exemple #1
0
    def smooth(self, width, name='boxcar'):
        """
        Smooth a spectrum.

        width = total number of pixels in the kernel 

        name = a string used by sp.signal.get_window.  Things like
        ('gaussian' 1.5) are ok, see scipy documentation. 

        Because even widths are asymmetric, this only allows odd
        kernels.
        """
        if width % 2 == 0:
            raise ValueError(
                "Only allows odd widths (even kernels are asymetric)")
        W = get_window(name, width)
        W /= abs(sp.sum(W))

        fsmooth = sp.convolve(self.f, W, mode='same')
        #treat edge effects by replacing with original spectrum
        s1 = slice(0, width)
        s2 = slice(-(width), None)

        fsmooth[s1] = self.f[s1]
        fsmooth[s2] = self.f[s2]

        if self.ef is not None:
            efsmooth = sp.sqrt(sp.convolve(self.ef**2, W**2, mode='same'))
            efsmooth[s1] = self.ef[s1]
            efsmooth[s2] = self.ef[s2]
            self.ef = efsmooth

        self.f = fsmooth
Exemple #2
0
    def smooth(self,width,name='boxcar'):
        """
        Smooth a spectrum.

        width = total number of pixels in the kernel 

        name = a string used by sp.signal.get_window.  Things like
        ('gaussian' 1.5) are ok, see scipy documentation. 

        Because even widths are asymmetric, this only allows odd
        kernels.
        """
        if width%2 == 0:
            raise ValueError("Only allows odd widths (even kernels are asymetric)")
        W = get_window(name,width)
        W /= abs(sp.sum(W))

        fsmooth = sp.convolve(self.f,W,mode='same')
        #treat edge effects by replacing with original spectrum
        s1 = slice(0,width)
        s2 = slice(- (width ),None)

        fsmooth[s1] = self.f[s1]
        fsmooth[s2] = self.f[s2]

        if self.ef is not None:
            efsmooth = sp.sqrt(sp.convolve(self.ef**2,W**2,mode='same'))
            efsmooth[s1] = self.ef[s1]
            efsmooth[s2] = self.ef[s2]
            self.ef = efsmooth

        self.f = fsmooth
Exemple #3
0
    def SmoothPitchHistogram(self, Histogram=[-1], Variance=15):
        """This function performs low pass filtering (using a window of the shape normal distribution) of the histogram
        Input parameters:
        Variance = variance of the normal distribution used for smoothening of the histogram (default ==15) IN CENTS!!!
        """

        variance = float(
            Variance
        ) / self.hResolution  #variance of the normal distribution, in bins
        wind_range = (np.array([-50, 50]) / self.hResolution
                      )  # from index corresponding to -50 to 50 cents
        norm_win = sps.norm(0, variance).pdf(
            np.linspace(wind_range[0],
                        wind_range[1],
                        num=1 + wind_range[1] - wind_range[0]))
        norm_win = norm_win / sum(norm_win)

        #convolving histogram withh  this window
        if (len(Histogram) == 1):
            hist_smooth = sp.convolve(self.hist_Yval, norm_win, 'same')

        else:
            hist_smooth = sp.convolve(Histogram, norm_win, 'same')

        #normalizing both histograms
        hist_smooth = hist_smooth / max(hist_smooth)

        if (len(Histogram) == 1):
            self.hist_Yval = hist_smooth
        else:
            return hist_smooth
Exemple #4
0
    def _get_yz(self, L, getcovar=True):
        """
        Gets flux and error (y and z) near the line wavelengths, makes
        sure everything is aligned.
        """

        l = deepcopy(L)

        #shift
        l.wv -= self.p['shift']
        m = (self.Lref.wv >= l.wv.min()) * (self.Lref.wv <= l.wv.max())
        y, z = l.interp(self.Lref.wv[m])
        #convolve
        k = self._get_kernel(self.Lref.wv[m])

        #have to make covar before smoothing z
        if getcovar:
            if self.kernelname == 'Delta':
                covar = get_covarmatrix(l.wv, self.Lref.wv[m], l.ef, k, 2)
            else:
                covar = get_covarmatrix(l.wv, self.Lref.wv[m], l.ef, k,
                                        5 * self.p['width'])

        z = sp.sqrt(sp.convolve(z**2, k**2, mode='same'))
        y = sp.convolve(y, k, mode='same')

        #scale
        z *= self.p['scale']
        y *= self.p['scale']
        if getcovar:
            covar *= self.p['scale'] * self.p['scale']
            #add reference errors now to simplify the chi2----these don't
            #seem to matter much
            covar[sp.diag_indices_from(covar)] += self.Lref.ef[m]**2

        #trim 10% of data to help with edge effects and shifting the
        #data.  This number is hard-coded so that the degrees of
        #freedom are fixed during the fit.
        trim = int(round(0.05 * (self.Lref.wv[m].size)))

        z = z[trim:-trim]
        y = y[trim:-trim]
        if getcovar:
            covar = covar[:, trim:-trim]
            covar = covar[trim:-trim, :]

        #need a mask for reference when calculating chi^2
        m2 = (self.Lref.wv >= self.Lref.wv[m][trim]) * (self.Lref.wv <
                                                        self.Lref.wv[m][-trim])

        if getcovar:
            # Note that z**2 (error spectrum) is slightly different
            # than the diagonal of covar, because covariance was
            # ignored for z
            return y, z**2, m2, covar
        else:
            return y, z**2, m2
Exemple #5
0
    def velocity_smooth(self, v_width):
        """ 
        Smooths with constant velcoity dispersion by resampling on
        even log intervals, smooth with gaussian of specific width,
        and then resampling to original wavelengths.

        Because log spacing is uneven, cannot use sinc interpolation
        (will use bsplines instead).

        Gaussian width is specified in km/s.
        """
        #get y for evenly spaced in log x
        xorig = deepcopy(self.wv)
        lwv = sp.log(self.wv)
        lognew = sp.r_[lwv.min():lwv.max():1j * lwv.size]
        xnew = sp.exp(lognew)
        xnew[0] = xorig[0]
        xnew[-1] = xorig[-1]

        #since bins will be uneven, cannot use sinc
        flag = 0
        if self.style == 'sinc':
            flag = 1
            print(
                'Warning:  Setting interp style to "bspline" since log spacing is uneven'
            )
            self.set_interp(style='bspline')
            self.rebin(xnew)

        #assumes v_width in km/s
        dpix = v_width / 2.998e5 / (lognew[1] - lognew[0])
        #kernel width goes out to 5 sigma
        kw = round(dpix * 10)
        if kw % 2 == 0:
            kw += 1
        W = get_window(('gaussian', dpix), kw)
        W /= abs(sp.sum(W))

        fsmooth = sp.convolve(self.f, W, mode='same')

        s1 = slice(0, kw)
        s2 = slice(-(kw), None)
        fsmooth[s1] = self.f[s1]
        fsmooth[s2] = self.f[s2]

        if self.ef is not None:
            efsmooth = sp.sqrt(sp.convolve(self.ef**2, W**2, mode='same'))
            efsmooth[s1] = self.ef[s1]
            efsmooth[s2] = self.ef[s2]
            self.ef = efsmooth

        self.f = fsmooth

        self.rebin(xorig)

        if flag == 1:
            self.set_interp(style='sinc')
Exemple #6
0
    def velocity_smooth(self,v_width):
        """ 
        Smooths with constant velcoity dispersion by resampling on
        even log intervals, smooth with gaussian of specific width,
        and then resampling to original wavelengths.

        Because log spacing is uneven, cannot use sinc interpolation
        (will use bsplines instead).

        Gaussian width is specified in km/s.
        """
        #get y for evenly spaced in log x
        xorig = deepcopy(self.wv)
        lwv = sp.log(self.wv)
        lognew = sp.r_[lwv.min():lwv.max():1j*lwv.size]
        xnew = sp.exp(lognew)
        xnew[0] = xorig[0]
        xnew[-1] = xorig[-1]

        #since bins will be uneven, cannot use sinc
        flag = 0
        if self.style=='sinc':
            flag = 1
            print('Warning:  Setting interp style to "bspline" since log spacing is uneven')
            self.set_interp(style='bspline')
            self.rebin(xnew)

        #assumes v_width in km/s
        dpix = v_width/2.998e5/(lognew[1] - lognew[0])
        #kernel width goes out to 5 sigma
        kw = round(dpix*10)
        if kw%2 == 0:
            kw += 1
        W = get_window(('gaussian', dpix),kw)
        W /= abs(sp.sum(W))

        fsmooth = sp.convolve(self.f,W,mode='same')

        s1 = slice(0,kw)
        s2 = slice(- (kw),None)
        fsmooth[s1] = self.f[s1]
        fsmooth[s2] = self.f[s2]


        if self.ef is not None:
            efsmooth = sp.sqrt(sp.convolve(self.ef**2,W**2,mode='same'))
            efsmooth[s1] = self.ef[s1]
            efsmooth[s2] = self.ef[s2]
            self.ef = efsmooth

        self.f = fsmooth
        
        self.rebin(xorig)

        if flag ==1:
            self.set_interp(style='sinc')
Exemple #7
0
def getVmax(path):
    path = functions.cubicSplineInterp(path, nfs=2)
    path[:,0] = functions.butter_lowpass_filter(path[:,0], highcut=10, fs=100*2, order=3)
    path[:,1] = functions.butter_lowpass_filter(path[:,1], highcut=10, fs=100*2, order=3)
    # path = numpy.pad(path, pad_width=((pad,pad),(0,0)), mode="edge")
    speed_x = convolve(path[:,0], velocKernel, mode="valid")
    speed_y = convolve(path[:,1], velocKernel, mode="valid")
    velocity = numpy.sqrt(speed_x**2 + speed_y**2)
    vm = numpy.max(velocity) 
    return vm
def ttl(d, n, m=10**7):
    """
    /u/ttl's solution. Mathematical solution, no idea of how it works but it returns the solution and bonus solution
    very quickly.
    """
    p = q = [1]*6+[0]
    i = 1
    while i < d:
        q, i = (convolve(q, q) % m, i*2) if 2*i < d else (convolve(q, p) % m, i+1)
    return q[-n-1]
Exemple #9
0
def _boxcar(data, nbefore, nafter):
    """ Inside boxcar averaging function doing real works.
        
    Parameters
    -----------
    
    data: array
        data samples at regular intervals.
                  
    nbefore: int
        number of samples before the point to be averaged
        (not including this point).
        
    nafter: int
        number of samples after the point to be averaged 
        (not including this point).
        
    Returns
    --------
    
    Results: array
        a new averaged data samples.
            
    """
    ntotal = nbefore + nafter + 1
    b = [1.0 / ntotal] * ntotal
    a = 1.0
    size_data = data.size

    ## Using linear filter doing averaging
    ## of ntotal samples.
    ##dd=lfilter(b,a,data)
    ##dd=[add.reduce(data[i-nbefore:i+nafter+1]) for i in range(nbefore,len_data-nafter)]
    if data.ndim == 1:
        dim2_size = 1
        dd = convolve(data, b, mode="valid")
    elif data.ndim == 2:  ## for multi-dimension data,convolve can't handle it directly
        dim2_size = data.shape[1]
        dd = [convolve(data[:, i], b, mode="valid") for i in range(dim2_size)]
        dd = sciarray(dd)
        dd = transpose(dd)
    else:
        raise "_boxcar function can't process data with dimension more than 2"

    ## convole first dimension length.
    dd_len = dd.shape[0]
    ## Based on the nbefore and nafter, do a
    ## sample result shifting.
    dt = sciarray([nan] * size_data)
    dt = dt.reshape(data.shape)

    #dt[nbefore:len_data-nafter]=dd[nbefore+nafter:len_data]
    dt[nbefore:dd_len + nbefore, ] = dd[0:dd_len, ]
    return dt
Exemple #10
0
 def __init__(self, time, path, smoothing, pad=2, zeroInit=True):
     super(pathOps, self).__init__()
     self.pad = pad
     self.len = path.shape[0]
     self.STARTIDX = pad
     self.ENDIDX = path.shape[0] - 1 + pad
     self.zeroInit = zeroInit
     self.time, self.path = self.init(time, path, smoothing, pad)
     self.speed_x = convolve(self.path[:,0], velocKernel, mode="valid")
     self.speed_y = convolve(self.path[:,1], velocKernel, mode="valid")
     self.velocity = numpy.sqrt(self.speed_x**2 + self.speed_y**2)
Exemple #11
0
def calculateSNRvt(time, path, param, tinf1idx, tinf2idx):
    vxn = convolve(path[tinf1idx-pad:tinf2idx+pad+1, 0], velocKernel, mode="valid")
    vyn = convolve(path[tinf1idx-pad:tinf2idx+pad+1, 1], velocKernel, mode="valid")
    velocity_n = numpy.sqrt(vxn**2 + vyn**2)

    D, t0, mu, sigma, theta_s, theta_e = param
    ln = functions.lognormal(mu, sigma, loc=t0)
    T = time[tinf1idx-pad:tinf2idx-pad+1]
    velocity_a = ln.eval(T) * D
    
    N = numpy.sum(numpy.square(velocity_a - velocity_n))
    S = numpy.sum(numpy.square(velocity_n))
    return 10 * math.log10(S / (N + 1e-8)), S, N
Exemple #12
0
def calculateSNRvxy(time, path, param, tinf1idx, tinf2idx):
    vxn = convolve(path[tinf1idx-pad:tinf2idx+pad+1, 0], velocKernel, mode="valid")
    vyn = convolve(path[tinf1idx-pad:tinf2idx+pad+1, 1], velocKernel, mode="valid")
    velocity_n = numpy.sqrt(vxn**2 + vyn**2)

    D, t0, mu, sigma, theta_s, theta_e = param
    ln = functions.lognormal(mu, sigma, loc=t0)
    T = time[tinf1idx-pad:tinf2idx-pad+1]
    velocity_a = ln.eval(T) * D
    vxa, vya = angleEst.estimateVxy(velocity_a, T, *param)

    N = numpy.sum((numpy.square(vxn - vxa) + numpy.square(vyn - vya)))
    S = numpy.sum((numpy.square(vxn) + numpy.square(vyn)))
    return 10. * math.log10(S / (N + 1e-8)), S, N
Exemple #13
0
def d_average_multinom(l, n, sequence_type, score_type):
    """ Helper function for the analytic calculation of parsimony or pSim scores.

    Called by ``d_average_multiple_max_multinom`` or ``d_average_multiple_pars_multinom``.

    Return the ``l``-times self-convoluted pdf of the score on random sequence_type data
    of length ``n``. The order of the pdf is kept from ``column_pdf()``.

    For details see:

    Schaper, E., Kajava, A., Hauser, A., & Anisimova, M. Repeat or not repeat?
    --Statistical validation of tandem repeat prediction in genomic sequences.
    Nucleic Acids Research (2012).

    Args:
        l (int): The length of the repeat unit.
        n (int): The number of repeat units.
        sequence_type (str): The type of the sequence: either "AA" or "DNA".
        score_type: The model of repeat evolution used. Either 'psim' or 'parsimony'.

    Returns:
        (description missing)

    .. warning:: if precision higher than max(uint32) use uint64 instead.
            CHECK: http://docs.scipy.org/doc/numpy/user/basics.types.html

    .. todo:: Describe return value.
    """

    complete_pdf = column_pdf(n=n, score_type=score_type, sequence_type=sequence_type)
    if l != 1:
        single_column_pdf = complete_pdf
        for i in range(2, l + 1):
            complete_pdf = sp.convolve(complete_pdf, single_column_pdf)
    return complete_pdf
def convolve_with_profile(pulsar_object, input_array):
    """
    General convolution function. Takes an input array made in other functions
    to convolve with the pulse profile.
    
    Parameters
    ---
    pulsar_object: VersionZeroPointZero.pulsar.Pulsar object
        The pulsar object
    input_array: somewhere
        Any array the user wants to convolve with the pulse profile
    """

    width = pulsar_object.nBinsPeriod
    for ii, freq in enumerate(pulsar_object.Signal_in.freq_Array):
        #Normalizing the pulse profile
        pulsar_prof_sum = np.sum(pulsar_object.profile[ii, :])
        pulsar_prof_norm = pulsar_object.profile[ii, :] / pulsar_prof_sum

        #Normalizing the input array
        input_array_sum = np.sum(input_array[ii, :])
        input_array_norm = input_array[ii, :] / input_array_sum

        #Convolving the input array with the pulse profile
        convolved_prof = sp.convolve(pulsar_prof_norm, input_array_norm,
                                     "full")

        #Renormalizing the convolved pulse profile
        pulsar_object.profile[
            ii, :] = (pulsar_prof_sum) * (convolved_prof[:width])
def callback_LaserScan(new_LaserScan=LaserScan()):
    print len(
        new_LaserScan.ranges), new_LaserScan.angle_min, new_LaserScan.angle_max
    data = np.array(new_LaserScan.ranges)
    cost = np.zeros_like(data)

    # calculate cost
    cost[data > 10] = 0
    cost[data < 10] = 1.0 * (10 - data[data < 10]) / 10
    cost[data < 2] = 1.

    # smooth sharp edges
    kernel = np.zeros(15)
    kernel[5:-5] = 1
    kernel /= np.sum(kernel)

    convolved_cost = scipy.convolve(cost, kernel)

    cost[data < 5] = convolved_cost[data < 5]

    cost *= 5
    new_LaserScan.ranges = cost
    pub_cost_range.publish(new_LaserScan)

    print 'published data 2', np.mean(cost)
Exemple #16
0
def f_Refl_Thick_Film():
    a = P4Rm()

    wl = a.AllDataDict['wavelength']
    t = a.AllDataDict['damaged_depth']
    N = a.AllDataDict['number_slices']
    t_film = a.AllDataDict['film_thick']

    G = a.ParamDict['G']
    thB_S = a.ParamDict['thB_S']
    resol = a.ParamDict['resol']
    phi = a.ConstDict['phi']
    t_l = a.ParamDict['t_l']
    z = a.ParamDict['z']
    FH = a.ParamDict['FH']
    FmH = a.ParamDict['FmH']
    F0 = a.ParamDict['F0']
    sp = a.ParamDict['sp']
    dwp = a.ParamDict['dwp']
    th = a.ParamDict['th']
    spline_DW = a.splinenumber[1]
    spline_strain = a.splinenumber[0]
    param = a.ParamDict['par']
    delta_t = t_film - t

    strain = f_strain(z, param[:len(sp):], t, spline_strain)
    DW = f_DW(z, param[len(sp):len(sp) + len(dwp):], t, spline_DW)
    thB = thB_S - strain * tan(thB_S)  # angle de Bragg dans chaque lamelle

    eta = 0
    res = 0

    g0 = sin(thB[0] - phi)  # gamma 0
    gH = -sin(thB[0] + phi)  # gamma H
    b = g0 / gH
    T = pi * G * ((FH[0] * FmH[0])**0.5) * delta_t / (wl * (abs(g0 * gH)**0.5))
    eta = (-b * (th - thB[0]) * sin(2 * thB_S) - 0.5 * G * F0[0] *
           (1 - b)) / ((abs(b)**0.5) * G * (FH[0] * FmH[0])**0.5)
    S1 = (res - eta + (eta * eta - 1)**0.5) * exp(-1j * T *
                                                  (eta * eta - 1)**0.5)
    S2 = (res - eta - (eta * eta - 1)**0.5) * exp(1j * T *
                                                  (eta * eta - 1)**0.5)
    res = (eta + ((eta * eta - 1)**0.5) * ((S1 + S2) / (S1 - S2)))

    n = 1
    while (n <= N):
        g0 = sin(thB[n] - phi)  # gamma 0
        gH = -sin(thB[n] + phi)  # gamma H
        b = g0 / gH
        T = pi * G * (
            (FH[n] * FmH[n])**0.5) * t_l * DW[n] / (wl * (abs(g0 * gH)**0.5))
        eta = (-b * (th - thB[n]) * sin(2 * thB_S) - 0.5 * G * F0[n] *
               (1 - b)) / ((abs(b)**0.5) * G * DW[n] * (FH[n] * FmH[n])**0.5)
        S1 = (res - eta + (eta * eta - 1)**0.5) * exp(-1j * T *
                                                      (eta * eta - 1)**0.5)
        S2 = (res - eta - (eta * eta - 1)**0.5) * exp(1j * T *
                                                      (eta * eta - 1)**0.5)
        res = (eta + ((eta * eta - 1)**0.5) * ((S1 + S2) / (S1 - S2)))
        n += 1
    return convolve(abs(res)**2, resol, mode='same')
Exemple #17
0
def generate_data(Kernels, Events, Weights, t_stop, noise=0):
    """

    """

    dt = Kernels.sampling_period

    nSamples = sp.rint((t_stop / dt).magnitude).astype('int32')
    signals = sp.zeros((nSamples, len(Events)))

    tvec = sp.arange(0, t_stop.magnitude, dt.magnitude) * pq.s

    # convolution
    for i, event in enumerate(Events):
        inds = times2inds(tvec, event.times)
        binds = sp.zeros(tvec.shape)
        binds[inds] = 1
        kernel = Kernels[:, i].magnitude.flatten()
        signals[:, i] = sp.convolve(binds, kernel, mode='same')

    # distributing weights
    signals = signals @ Weights

    # adding noise
    signals = signals + sp.randn(*signals.shape) * noise

    # cast to neo asig
    Asigs = []
    for i in range(signals.shape[1]):
        asig = neo.core.AnalogSignal(signals[:, i],
                                     units=pq.dimensionless,
                                     sampling_period=dt)
        Asigs.append(asig)
    return Asigs
Exemple #18
0
def skeletize_latecki(img,gaussian_filter):
    # http://www.cis.temple.edu/~latecki/Papers/icip__SSM07.pdf
    dt=numpy.float32
    if (img.ndim==3):
      img=img.mean(axis=2)
    img=img.astype(dt)
    img/=img.max()      
    gimg=scipy.ndimage.gaussian_filter(img,2)
    ximg=img.max()-img
    dimg=scipy.ndimage.distance_transform_edt(ximg)
    #
    # now let's compute f
    fimg=1-scipy.abs(scipy.convolve(cgradient(gimg),dimg))
    #
    # and now compute its gradient
    u0,v0=scipy.gradient(fimg)
    #
    # now we do diffusion

    
    ##
    # now we apply SSM map
    gvf=diffuse_gradient_vector
    
    #
    # then we do particular point detection
    mimg1=scipy.ndimage.maximum_filter(dimg,size=(3,3))
    img=(((dimg-mimg1)==0).astype(dt))
    img*=ximg
    return img
Exemple #19
0
 def residual_lmfit(self, pars, x, y):
     a = P4Rm()
     self.strain_DW(pars)
     res = f_Refl_fit(a.AllDataDict["geometry"], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict["resol"], mode="same")
     y_cal = y_cal / y_cal.max() + a.AllDataDict["background"]
     return log10(y) - log10(y_cal)
Exemple #20
0
def smooth(array, binwidth):
    """
	takes a list where every element is some value and preforms a sliding window average around that coordinate
	createing a new list whos len is the same as inputed.
	"""
    array = scipy.convolve(array, scipy.ones(binwidth) / binwidth, mode='same')
    return (array)
Exemple #21
0
def discrete_gauss(n, g=[0.5, 0.5]):
    """
    discrete_gauss(n, g=[0.5, 0.5])

    Estimates the discrete Gaussian distribution (probability mass function)
    by multiple convolutions with a minimal kernel g.

    :param n: scalar.
           the number of elements of the result (n = 2..1000).
           the functions performs n-2 convolutions to create the result.

     :param g: 1-D array.
           the minimal kernel. Default value is [0.5, 0.5].
           Other kernels of the form [a, 1-a],
           where 0 > a > 1.0 are possible, but they are less effective:
           1. a larger n should be used to be as similar to a Gaussian.
           2. the peak of the result is not centered.

    :return: 1-D array.
          f, the discrete estimate of Gaussian distribution.
         f has n elements.
     """
    if g[0] <= 0 or g[1] >= 1:
        return None
    elif n not in range(2, 1001):
        return None
    elif sum(g) != 1.0:
        return None
    else:
        f = g
    for i in range(n - 2):
        f = sp.convolve(f, g)

    return f
Exemple #22
0
def NumRectifier(dt=-1, ydata=[]):
    if dt <= 0:
        return []

    if len(ydata) == 0:
        return []

    C = 1e-12
    R = 1e+10

    kernelSize = 5
    kernel = np.zeros(kernelSize)
    halflen, rem = divmod(kernelSize, 2)

    kernel[0] = 1 * C / (12 * dt)
    kernel[1] = -2 * C / (3 * dt)
    kernel[2] = -1 / R
    kernel[3] = 2 * C / (3 * dt)
    kernel[4] = -1 * C / (12 * dt)
    kernel = kernel * 0.25

    I_b = sci.convolve(ydata, kernel, 'same')
    I_b[0:halflen] = I_b[halflen + 1]
    I_b[len(I_b) - halflen:len(I_b)] = I_b[len(I_b) - halflen - 1]

    return I_b
Exemple #23
0
def dAverageMultinom(l, n, sequence_type, score):
    """ Helper function for the analytic calculation of parsimony or pSim scores.

    Called by ``dAverageMultipleMaxMultinom`` or ``dAverageMultipleParsMultinom``.

    Return the ``l``-times self-convoluted pdf of the score on random sequence_type data
    of length ``n``. The order of the pdf is kept from ``columnPDF()``.

    For details see:

    Schaper, E., Kajava, A., Hauser, A., & Anisimova, M. Repeat or not repeat?
    --Statistical validation of tandem repeat prediction in genomic sequences.
    Nucleic Acids Research (2012).

    Args:
        l (int): The length of the repeat unit.
        n (int): The number of repeat units.
        sequence_type (str): The type of the sequence: either "AA" or "DNA".
        score: The model of repeat evolution used. Either 'psim' or 'parsimony'.

    Returns:
        (description missing)

    .. warning:: if precision higher than max(uint32) use uint64 instead.
            CHECK: http://docs.scipy.org/doc/numpy/user/basics.types.html

    .. todo:: Describe return value.
    """

    completePDF = columnPDF(n=n, score=score, sequence_type=sequence_type)
    if l != 1:
        singleColumnPDF = completePDF
        for i in range(2, l + 1):
            completePDF = sp.convolve(completePDF, singleColumnPDF)
    return completePDF
Exemple #24
0
    def smooth(self, mol, smooth):
        """
		takes a list where every element is some value and preforms a sliding window average around that coordinate
		createing a new list whos len is the same as inputed.
		"""
        mol = scipy.convolve(mol, scipy.ones(smooth) / smooth, mode='same')
        return (mol)
Exemple #25
0
def smooth(x,window_len=11):
    ''' Remove fast oscillations from results '''
    s=r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
    
    w=ones(window_len,'d')
    y=convolve(w/w.sum(),s,mode='valid')
    return y
Exemple #26
0
 def residual_lmfit(self, pars, x, y):
     a = P4Rm()
     self.strain_DW(pars)
     res = f_Refl_fit(a.AllDataDict['geometry'], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict['resol'], mode='same')
     y_cal = y_cal / y_cal.max() + a.AllDataDict['background']
     return (log10(y) - log10(y_cal))
Exemple #27
0
    def area(self, mz=None, method='shoelace'):
        data = self.as_poly(mz)

        # filter out any points that have a nan
        fdata = data[~np.isnan(data).any(1)]

        if method == 'shoelace':
            # up to 5e-10 diff from shoelace-slow
            csum = np.sum(np.fliplr(np.roll(fdata, 1, axis=0)) * fdata, axis=0)
            return 0.5 * np.abs(csum[0] - csum[1])
        elif method == 'shoelace-slow':
            csum = 0
            x, y = fdata[-1, :]
            for i in fdata:
                csum += i[0] * y - i[1] * x
                x, y = i
            return abs(csum / 2.)
        elif method == 'trapezoid':
            # http://en.wikipedia.org/wiki/trapezoidal_rule#non-uniform_grid
            # todo: this essentially ignores baseline data?
            # fdata[:, 1][fdata[:, 1] < 0] = 0
            # y = convolve(fdata[:, 1], [0.5, 0.5], mode='valid')

            # y = convolve(np.abs(fdata[:, 1]), [0.5, 0.5], mode='valid')

            y = convolve(fdata[:, 1], [0.5, 0.5], mode='valid')
            if y.shape[0] != fdata.shape[0] - 1:
                return 0
            return np.sum(np.diff(fdata[:, 0]) * y)
        elif method == 'sum':
            return np.sum(fdata[:, 1])
Exemple #28
0
    def area(self, mz=None, method='shoelace'):
        data = self.as_poly(mz)

        # filter out any points that have a nan
        fdata = data[~np.isnan(data).any(1)]

        if method == 'shoelace':
            # up to 5e-10 diff from shoelace-slow
            csum = np.sum(np.fliplr(np.roll(fdata, 1, axis=0)) * fdata, axis=0)
            return 0.5 * np.abs(csum[0] - csum[1])
        elif method == 'shoelace-slow':
            csum = 0
            x, y = fdata[-1, :]
            for i in fdata:
                csum += i[0] * y - i[1] * x
                x, y = i
            return abs(csum / 2.)
        elif method == 'trapezoid':
            #http://en.wikipedia.org/wiki/trapezoidal_rule#non-uniform_grid
            #todo: this essentially ignores baseline data?
            #fdata[:, 1][fdata[:, 1] < 0] = 0
            #y = convolve(fdata[:, 1], [0.5, 0.5], mode='valid')

            #y = convolve(np.abs(fdata[:, 1]), [0.5, 0.5], mode='valid')

            y = convolve(fdata[:, 1], [0.5, 0.5], mode='valid')
            if y.shape[0] != fdata.shape[0] - 1:
                return 0
            return np.sum(np.diff(fdata[:, 0]) * y)
        elif method == 'sum':
            return np.sum(fdata[:, 1])
Exemple #29
0
 def filter(self, s):
     '''Accepts an input source - assumed to be from temporal ICA - and filters it using the
     supplied irf. Returns the convolved signal.'''
     # compute filter points out to about 25% of the maximum time (~0.25*dt*matchSamp[-1])
     maxt = 0.25 * self.dt * self.x[-1]
     t = np.arange(0, maxt, self.dt)
     return sp.convolve(self.compute_irf(t), s, mode='full')[0:len(s)]
Exemple #30
0
def Filtering(dt=-1, ydata=[], ktype='boxcar', kwidth=1):
    if (dt <= 0):
        return []

    if len(ydata) == 0:
        return []

    if kwidth < dt:
        kwidth = dt
    elif kwidth > 3:
        kwidth = 3

    winsize = int(np.floor(kwidth / dt))
    if winsize != 1:
        if (ktype == 'norm' or ktype == 'Gaussian'):
            window = signal.windows.gaussian(winsize, winsize / 6)
        else:
            # boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen...
            # https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.get_window.html#scipy.signal.get_window
            window = signal.get_window(ktype, winsize)

        window = window / sum(window)
        ydata = sci.convolve(ydata, window, 'same')
        halflen, rem = divmod(winsize, 2)
        if rem == 0:
            halflen = halflen + 1
        ydata[0:halflen] = ydata[halflen + 1]
        ydata[len(ydata) - halflen:len(ydata)] = ydata[len(ydata) - halflen -
                                                       1]

    return ydata
Exemple #31
0
def nudge_dataset(X, Y):
    """
    This produces a dataset 5 times bigger than the original one,
    by moving the 8x8 images in X around by 1px to left, right, down, up
    """
    direction_vectors = [
    [[0, 1, 0],
    [0, 0, 0],
    [0, 0, 0]],

    [[0, 0, 0],
    [1, 0, 0],
    [0, 0, 0]],

    [[0, 0, 0],
    [0, 0, 1],
    [0, 0, 0]],

    [[0, 0, 0],
    [0, 0, 0],
    [0, 1, 0]]]

    shift = lambda x, w: convolve(x.reshape((64, 64)), mode='constant', weights=w).ravel()
    X = np.concatenate([X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors])
    Y = np.concatenate([Y for _ in range(5)], axis=0)
    return X, Y
Exemple #32
0
def pdf_conv(D1, D2):
    #Works for continous PDFs is both deifned over the same grid - edit to allow different grid, but same grid intervals
    int1 = D1.interval(.99999)
    int2 = D2.interval(.99999)
    supp = [min(int1[0], int2[0]), max(int1[1], int2[1])]
    grid = np.linspace(supp[0], supp[1], 1000)

    return grid, scipy.convolve(D1.pdf(grid), D2.pdf(grid), 'same')
Exemple #33
0
def movingaverage(val, filt=np.r_[1., 1., 3., 1., 1.]):
    filt = filt / filt.sum()
    #     print filt
    temp = np.r_[val[0].repeat(filt.size - 1), val,
                 val[-1].repeat(filt.size - 1)]
    out = scipy.convolve(temp, filt)
    return out[filt.size - 1 + (filt.size - 1) * 0.5:filt.size - 1 +
               (filt.size - 1) * 0.5 + val.size]
Exemple #34
0
def updateL(val):
    global hw, da, cx, cl
    hw = fwhmL.val
    lorenz=Lorenz(cx-xmed,hw)*step
    cl=scipy.convolve(da, lorenz, mode=1)
    lpl.set_ydata(cl)    
    #print integral(cx,da), integral(cx,cl)
    draw()
Exemple #35
0
    def compute(
        self,
        anaSigList,
        sign='-',
        left_sweep=0.001,
        right_sweep=0.002,
        baseline_time=.05,
        rise_time=.001,
        peak_time=.001,
        threshold=20.,
        window=.0001,
    ):
        anaSig = anaSigList[0]
        sr = anaSig.sampling_rate

        nb = int(baseline_time * sr)
        nb = int(nb / 2.) * 2 + 1
        nr = int(rise_time * sr)
        np = int(peak_time * sr)
        print peak_time, np, sr
        nw = int(window * sr)

        sigBase = convolve(anaSig.signal, ones(nb, dtype='f') / nb, 'same')
        #~ sigBase = signal.medfilt(anaSig.signal , kernel_size = nb)

        sigPeak = convolve(anaSig.signal, ones(np, dtype='f') / np, 'same')

        if sign == '-':
            aboves = sigBase[:-(nr + nb / 2 + np / 2
                                )] - sigPeak[nr + nb / 2 + np / 2:] > threshold
        elif sign == '+':
            aboves = sigPeak[nr + nb / 2 + np / 2:] - sigBase[:-(
                nr + nb / 2 + np / 2)] > threshold

        print aboves
        # detection when n point consecutive more than window
        aboves = aboves.astype('f')
        aboves[0] = 0
        aboves[-1] = 0
        indup, = where(diff(aboves) > .5)

        return indup + nr + nb / 2 + np / 2

        #~ inddown, = where( diff(aboves)<-.5)
        #~ print indup
        print inddown
Exemple #36
0
def updateG(val):
    global hw, da, cx, cg, xmed, gauss
    hw = fwhmG.val
    gauss=Gauss(cx-xmed,hw)*step
    cg=scipy.convolve(da, gauss, mode=1)
    gpl.set_ydata(cg)
    #print integral(cx,da), integral(cx,cg)
    draw()
Exemple #37
0
def compensate(v, i, ke):
    '''
    Active Electrode Compensation, done offline.
    v = recorded potential
    i = injected current
    ke = electrode kernel
    Returns the compensated potential.
    '''
    return v - convolve(ke, i)[:-(len(ke) - 1)]
Exemple #38
0
    def __init__(self):
        report = Report('id', caption='env1d')
        self.N = 1000
        self.res = 10
        
        x = np.linspace(0, 10, self.N * self.res)
        self.E = scipy.convolve(np.random.ranf(len(x)),
                                np.ones(self.res * 20) / self.res * 20,
                                mode='same')
        plot_env(report, x, self.E)
        
        self.commands = [-2.5, 2.0]
        self.n_sampels = [0, 0]
        self.sensels = [30, 31]
        self.state = self.N / 2
        
        self.plot_y = False
        self.plot_e = False
        
        self.size = 60
        self.area = 9
        self.s = range(self.size)
        
        self.clean()
        lsize = 20
        sensor_noise = 0
        actuator_noise = 0
        self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise)
        report.text('info0', ('Learning size: \t\t%g \nActuator noise: \t%g ' + 
                             '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise))
        
        report.text('commands', str(self.commands))
        self.summarize(report, 0)
        
        
        self.state = self.N / 2
        self.clean()
        lsize = 100
        sensor_noise = 0
        actuator_noise = 2
        self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise)
        report.text('info1', ('Learning size: \t\t%g \nActuator noise: \t%g ' + 
                             '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise))
        self.summarize(report, 1)
        
        
        self.state = self.N / 2
        self.clean()
#        lsize = 1000
        sensor_noise = 2
        actuator_noise = 0
        self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise)
        report.text('info2', ('Learning size: \t\t%g \nActuator noise: \t%g ' + 
                             '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise))
        self.summarize(report, 2)
        
        report.to_html('env1d.html')
Exemple #39
0
def cost_graph(model):
    """Plot the cost as a function a constant u (single shooting) based on the
       model model.
    
    This function was mainly used for testing.
    
    Notes:
     * Currently written specifically for VDP.
     * Currently only supports inputs.
    """
    start_time = model.opt_interval_get_start_time()
    end_time = model.opt_interval_get_final_time()

    u = model.u
    print "Initial u:", u

    costs = []
    Us = []

    simulator = SundialsOdeSimulator(model,
                                     start_time=start_time,
                                     final_time=end_time)

    for u_elmnt in N.arange(-0.5, 1, 0.02):
        print "u is", u
        model.reset()
        u[0] = u_elmnt

        simulation.run()
        T, ys = simulation.get_solution()

        model.set_x_p(ys[-1], 0)
        model.set_dx_p(model.dx, 0)
        model.set_u_p(model.u, 0)

        cost = model.opt_eval_J()
        print "Cost:", cost

        # Saved for plotting
        costs.append(cost)
        Us.append(u_elmnt)

        cost_jac = model.opt_eval_jac_J(pyjmi.JMI_DER_X_P)

    p.subplot('121')
    p.plot(Us, costs)
    p.title("Costs as a function of different constant Us (VDP model)")

    from scipy import convolve
    p.subplot('122')
    dUs = Us
    dcost = convolve(costs, N.array([1, -1]) / 0.02)[0:-1]
    assert len(dUs) == len(dcost)
    p.plot(dUs, dcost)
    p.title('Forward derivatives')

    p.show()
def AEC_compensate(v, i, ke):
    '''
    Active Electrode Compensation, done offline.
    v = recorded potential
    i = injected current
    ke = electrode kernel
    Returns the compensated potential.
    '''
    return v - convolve(ke, i)[:-(len(ke) - 1)]
Exemple #41
0
 def remove_km(RawK, Km):
     '''
     Solves Ke = RawK - Km * Ke/Re for a dendritic Km.
     '''
     Kel = RawK - Km
     for _ in range(5): # Iterative solution
         Kel = RawK - convolve(Km, Kel)[:len(Km)] / sum(Kel)
         # NB: Re=sum(Kel) increases after every iteration
     return Kel
Exemple #42
0
def smooth(x,window_len=11,window='hanning'):
    """smooth the data using a window with requested size.
    
    This method is based on the convolution of a scaled window with the signal.
    The signal is prepared by introducing reflected copies of the signal 
    (with the window size) in both ends so that transient parts are minimized
    in the begining and end part of the output signal.
    
    input:
        x: the input signal 
        window_len: the dimension of the smoothing window; should be an odd integer
        window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
            flat window will produce a moving average smoothing.

    output:
        the smoothed signal
        
    example:

    t=linspace(-2,2,0.1)
    x=sin(t)+randn(len(t))*0.1
    y=smooth(x)
    
    see also: 
    
    numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
    scipy.signal.lfilter
 
    TODO: the window parameter could be the window itself if an array instead of a string
    NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
    """ 
     
    if x.ndim != 1:
        raise ValueError, "smooth only accepts 1 dimension arrays."

    if x.size < window_len:
        raise ValueError, "Input vector needs to be bigger than window size."
        

    if window_len<3:
        return x
    
    
    if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
        raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
    

    s=scipy.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
    #print(len(s))
    if window == 'flat': #moving average
        w=scipy.ones(window_len,'d')
    else:
        w=eval('scipy.'+window+'(window_len)')
    
    y=scipy.convolve(w/w.sum(),s,mode='same')
    return y    
Exemple #43
0
 def residual_square(self, p, E_min, nb_minima):
     a = P4Rm()
     P4Rm.ParamDict["_fp_min"] = p
     self.strain_DW()
     res = f_Refl_fit(a.AllDataDict["geometry"], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict["resol"], mode="same")
     y_cal = y_cal / y_cal.max() + a.AllDataDict["background"]
     y_obs = a.ParamDict["Iobs"]
     self.on_pass_data_to_thread(y_cal, p, E_min, nb_minima)
     return ((log10(y_obs) - log10(y_cal)) ** 2).sum() / len(y_cal)
Exemple #44
0
    def compute(self, anaSigList, sign = '-', left_sweep = 0.001 , right_sweep = 0.002, 
                                                                        baseline_time = .05,
                                                                        rise_time = .001,
                                                                        peak_time = .001,
                                                                        threshold =20.,
                                                                        window = .0001,

                                                ):
        anaSig = anaSigList[0]
        sr = anaSig.sampling_rate
        
        nb = int(baseline_time*sr)
        nb = int(nb/2.)*2+1
        nr = int(rise_time*sr)
        np = int(peak_time*sr)
        print peak_time, np, sr
        nw =  int(window*sr)
        
        sigBase = convolve(anaSig.signal , ones(nb, dtype='f')/nb , 'same')
        #~ sigBase = signal.medfilt(anaSig.signal , kernel_size = nb)
        
        sigPeak = convolve(anaSig.signal , ones(np, dtype='f')/np , 'same')
        
        if sign=='-':
            aboves=  sigBase[:-(nr+nb/2+np/2)]  - sigPeak[nr+nb/2+np/2:] >  threshold 
        elif sign=='+':
            aboves =    sigPeak[nr+nb/2+np/2:] - sigBase[:-(nr+nb/2+np/2)] >  threshold 
        
        
        print aboves
        # detection when n point consecutive more than window
        aboves = aboves.astype('f')
        aboves[0]=0
        aboves[-1]=0
        indup, = where( diff(aboves)>.5)
        
        return indup+nr+nb/2+np/2
        
        #~ inddown, = where( diff(aboves)<-.5)
        #~ print indup
        print inddown
Exemple #45
0
def f_Refl_Thick_Film():
    a = P4Rm()

    wl = a.AllDataDict['wavelength']
    t = a.AllDataDict['damaged_depth']
    N = a.AllDataDict['number_slices']
    t_film = a.AllDataDict['film_thick']

    G = a.ParamDict['G']
    thB_S = a.ParamDict['thB_S']
    resol = a.ParamDict['resol']
    phi = a.ConstDict['phi']
    t_l = a.ParamDict['t_l']
    z = a.ParamDict['z']
    FH = a.ParamDict['FH']
    FmH = a.ParamDict['FmH']
    F0 = a.ParamDict['F0']
    sp = a.ParamDict['sp']
    dwp = a.ParamDict['dwp']
    th = a.ParamDict['th']
    spline_DW = a.splinenumber[1]
    spline_strain = a.splinenumber[0]
    param = a.ParamDict['par']
    delta_t = t_film - t

    strain = f_strain(z, param[:len(sp):], t, spline_strain)
    DW = f_DW(z, param[len(sp):len(sp)+len(dwp):], t, spline_DW)
    thB = thB_S - strain * tan(thB_S)  # angle de Bragg dans chaque lamelle

    eta = 0
    res = 0

    g0 = sin(thB[0] - phi)  # gamma 0
    gH = -sin(thB[0] + phi)  # gamma H
    b = g0 / gH
    T = pi * G * ((FH[0]*FmH[0])**0.5) * delta_t / (wl * (abs(g0*gH)**0.5))
    eta = (-b*(th-thB[0])*sin(2*thB_S) - 0.5*G*F0[0]*(1-b)) / ((abs(b)**0.5) * G  * (FH[0]*FmH[0])**0.5)
    S1 = (res - eta + (eta*eta-1)**0.5)*exp(-1j*T*(eta*eta-1)**0.5)
    S2 = (res - eta - (eta*eta-1)**0.5)*exp(1j*T*(eta*eta-1)**0.5)
    res = (eta + ((eta*eta-1)**0.5) * ((S1+S2)/(S1-S2)))

    n = 1
    while (n <= N):
        g0 = sin(thB[n] - phi)  # gamma 0
        gH = -sin(thB[n] + phi)  # gamma H
        b = g0 / gH
        T = pi * G * ((FH[n]*FmH[n])**0.5) * t_l * DW[n] / (wl * (abs(g0*gH)**0.5))
        eta = (-b*(th-thB[n])*sin(2*thB_S) - 0.5*G*F0[n]*(1-b)) / ((abs(b)**0.5) * G * DW[n] * (FH[n]*FmH[n])**0.5)
        S1 = (res - eta + (eta*eta-1)**0.5)*exp(-1j*T*(eta*eta-1)**0.5)
        S2 = (res - eta - (eta*eta-1)**0.5)*exp(1j*T*(eta*eta-1)**0.5)
        res = (eta + ((eta*eta-1)**0.5) * ((S1+S2)/(S1-S2)))
        n += 1
    return convolve(abs(res)**2, resol, mode='same')
def metrique_pheno_derivative(ndvi=sp.empty):
    """
    This method use the second derivative of the NDVI to identifie the dates of beginning of season, end of season and more.
     Parameters:
     ----------
        
        ndvi : correspond à l'evolution DU NDVI pour un pixel sur une année
    
    """
    
    try:
        ndviMin=ndvi.min() #valeur minimale
        ndviMax=ndvi.max() #valeur maximale
        #ndviMean=ndvi.mean() #valeur moyenne
        
        indMin=int(sp.median(sp.where(ndvi==ndviMin))) #indice du minimum
        indMax=int(sp.median(sp.where(ndvi==ndviMax))) #indice du max
        d1=sp.convolve(ndvi, [1, -1],'same') #first derivative approximation
        d2=sp.convolve(ndvi, [1, -2, 1],'same') #second derivative approximation
        
        k1=d1[:-1]*d1[1:] #to find inflection point
        k2=d2[:-1]*d2[1:] #to find inflection point
        
        ind0d1=(sp.where(k1[:indMax]<0))[0][-1]
        ind0d2=(sp.where(k2[:indMax]<0))[0][-1]

        sos=ind0d2
        sos=sp.nanmax([ind0d1,ind0d2])
        
        ind0d22=(sp.where(k2[indMax+1:]<0))[0][0]

        
        eos=ind0d22+indMax
        
        los=eos-sos
        out=[sos,eos+1,los,indMin+1,indMax+1,ndviMin,ndviMax] # +1 parceque l'indice commence à 0
    except:
        out=[-1,-1,-1,-1,-1,-1,-1] 
    
    return out
Exemple #47
0
    def __call__(self,xnew):
        xnew.sort()
        #no extraplolation
        if (xnew < self.x[0]).any() or (xnew > self.x[-1]).any():
            raise ValueError("new abcissas are outside of original domain")

        self.out = sp.zeros(xnew.size)
        for ex in xnew:
            k = self._get_kernel(ex,self.window)
            yi = sp.convolve(self.y,k,mode='same')
            self._tidy(ex,xnew,yi)

        return self.out
Exemple #48
0
def smooth(args):

    args = prsr.parse_args(args)

    # Create the average table
    hshmp = {}
    fls = ['measure.csv']
    tbls = [ np.genfromtxt(fl,skip_header=1) for fl in fls ]
    # First we build up a big intensity to luminance map
    for tbl in tbls:
        for rw in tbl:
            if hshmp.has_key(rw[0]):
                hshmp[rw[0]] = np.concatenate([hshmp[rw[0]],rw[1:]])
            else:
                hshmp[rw[0]] = rw[1:]

    # Now we average the values, clearing all nans from the picture.
    for ky in hshmp.keys():
        values = hshmp[ky][~np.isnan(hshmp[ky])]
        # set outliers to NaN. Outliers are values that are more than 0.01 cd
        # or more than 0.1% of their value from the closest measurement at the
        # same intensity.
        min_diff = np.empty_like(values)
        for i in range(len(values)):
            idx = np.ones(len(values), dtype=bool)
            idx[i] = False
            min_diff[i] = np.min(np.abs(values[idx] - values[i]))
        values[(min_diff > 0.01) & (min_diff / values > 0.001)] = np.NaN
        hshmp[ky] = np.mean(values[np.isnan(values) == False])
        if np.isnan(hshmp[ky]):
            raise RuntimeError('no valid measurement for %f' % ky)
    tbl = np.array([hshmp.keys(),hshmp.values()]).transpose()
    tbl = tbl[tbl[:,0].argsort()]

    # And smooth it
    krn=[0.2,0.2,0.2,0.2,0.2]
    wfl='smooth.csv'

    smthd = tbl[:,1]

    for i in range(args.ordr):
        smthd = np.hstack((np.ones(2) * smthd[0], smthd, np.ones(2) * smthd[-1]))
        smthd = sp.convolve(smthd, krn, 'valid')

    print 'Saving to File...'
    tbl[:, 1] = smthd
    ofl = open(wfl,'w')
    ofl.write('Input Luminance\r\n')
    np.savetxt(ofl,tbl)
    ofl.close()
Exemple #49
0
def f_Refl_Substrate_fit(Data):
    a = P4Rm()

    G = a.ParamDict['G']
    thB_S = a.ParamDict['thB_S']
    resol = a.ParamDict['resol']
    b_S = a.ParamDict['b_S']
    FH = a.ParamDict['FH']
    FmH = a.ParamDict['FmH']
    F0 = a.ParamDict['F0']
    th = a.ParamDict['th']	
    eta = (-b_S*(th-thB_S)*sin(2*thB_S) - 0.5*G*F0[0]*(1-b_S)) / ((abs(b_S)**0.5) * G * (FH[0]*FmH[0])**0.5)
    res = (eta - signe(eta.real)*((eta*eta - 1)**0.5)) * (FH[0] / FmH[0])**0.5
    return convolve(abs(res)**2, resol, mode='same')
def mteo(data, kvalues=[1, 3, 5], condense=True):
    """multiresolution teager energy operator using given k-values [MTEO]

    The multi-resolution teager energy operator (MTEO) applies TEO operators
    of varying k-values and returns the reduced maximum response TEO for each
    input sample.

    To assure a constant noise power over all kteo channels, we convolve the
    individual kteo responses with a window:
    h_k(i) = hamming(4k+1) / sqrt(3sum(hamming(4k+1)^2) + sum(hamming(4k+1))
    ^2), as suggested in Choi et al., 2006.

    :type data: ndarray
    :param data: The signal to operate on. ndim=1
    :type kvalues: list
    :param kvalues: List of k-values to run the kteo for. If you want to give
        a single k-value, either use the kteo directly or put it in a list
        like [2].
    :type condense: bool
    :param condense: if True, use max operator condensing onto one time series,
        else return a multichannel version with one channel per kvalue.
        Default=True
    :return: ndarray- Array of same shape as the input signal, holding the
        response of the kteo which response was maximum after smoothing for
        each sample in the input signal.
    """

    # inits
    rval = sp.zeros((data.size, len(kvalues)))

    # evaluate the kteos
    for i, k in enumerate(kvalues):
        try:
            rval[:, i] = kteo(data, k)
            win = sp.hamming(4 * k + 1)
            win /= sp.sqrt(3 * (win ** 2).sum() + win.sum() ** 2)
            rval[:, i] = sp.convolve(rval[:, i], win, 'same')
        except:
            rval[:, i] = 0.0
            log.warning('MTEO: could not calculate kteo for k=%s, '
                        'data-length=%s',
                        k, data.size)
    rval[:max(kvalues), i] = rval[-max(kvalues):, i] = 0.0

    # return
    if condense is True:
        rval = rval.max(axis=1)
    return rval
Exemple #51
0
def _basic_smooth(signal, kernel):
    """basic smoothing using the explicit kernel given

    :Parameters:
        signal : ndarray
            multichanneled signal [data,channel]
        kernel : ndarray
            kernel used for smoothing
    """

    if kernel.size >= signal.shape[0]:
        raise ValueError('kernel window size larger than signal length')
    rval = N.zeros_like(signal)
    for i in xrange(signal.shape[1]):
        rval[:, i] = N.convolve(signal[:, i], kernel, 'same')
    return rval / kernel.sum()
Exemple #52
0
	def filter(self,lowcutoff, highcutoff):
		self.lowcutoff = lowcutoff
		self.highcutoff = highcutoff
		nyq = self.samplingFrequency / 2
		low = float(lowcutoff)
		high = float(highcutoff)
		#Lowpass filter
		a = signal.firwin(nyq, cutoff = low/nyq, 
									window = 'blackmanharris')
		#Highpass filter with spectral inversion
		b = - signal.firwin(nyq, cutoff = high/nyq, 
									window = 'blackmanharris') 
		b[nyq/2] = b[nyq/2] + 1
		#Combine into a bandpass filter
		self._d = - (a+b) 
		self._d[nyq/2] = self._d[nyq/2] + 1
		self.signal = convolve(self._d, self.signal)
def plotFishPosition(jsonData, startState=1, endState=0):
	state = jsonData['stateinfo']

	#HACK
	for nS in range(len(state)):
		if state[nS][1] == startState:
			break
	
	for nE in reversed(range(len(state))):
		if state[nE][1] == endState:
			break
				   
	st = state[nS][0]
	et = state[nE][0]
	tracking = getTracking(jsonData)
	tracking = tracking[np.logical_and(tracking[:,0] > st, tracking[:,0] < et),:].copy()
	frametime = tracking[:,0] - st
	positionx = tracking[:,1] 
	positiony = tracking[:,2] 
	for i in range(nS,nE):
		c1 = state[i][2]
		c2 = state[i][3]
		c1 = 'Yellow' if c1=='On' else c1
		c1 = 'White' if c1=='Off' else c1
		c2 = 'Yellow' if c2=='On' else c2
		c2 = 'White' if c2=='Off' else c2

		p1 = mpl.patches.Rectangle((state[i][0]-st,0),
					   width=state[i+1][0]-state[i][0],
					   height=40,alpha=0.5,
					   color=c1)
		p2 = mpl.patches.Rectangle((state[i][0]-st,40),
					   width=state[i+1][0]-state[i][0],
					   height=40,alpha=0.5,
					   color=c2)						
		pyplot.gca().add_patch(p1)
		pyplot.gca().add_patch(p2)
	pyplot.plot(frametime, positionx, 'k.')
	import scipy
	pyplot.plot(frametime, scipy.convolve(positionx,np.ones(400)/400, mode='same'), 'r-')
	pyplot.ylim([0,80])
	pyplot.xlim([0,et-st])
	pyplot.plot([0,et-st],[40,40],'y-')
	pyplot.ylabel('Fish position')
	pyplot.xlabel('Frame Time')
Exemple #54
0
 def residual_leastsq(self, p, y, x):
     a = P4Rm()
     P4Rm.ParamDict["_fp_min"] = p
     self.strain_DW()
     res = f_Refl_fit(a.AllDataDict["geometry"], self.Data4f_Refl)
     y_cal = convolve(abs(res) ** 2, a.ParamDict["resol"], mode="same")
     y_cal = y_cal / y_cal.max() + a.AllDataDict["background"]
     self.count += 1
     if self.count % 50 == 0:
         self.f_strain_DW()
         sleep(0.2)
         deformation = [p]
         evt = LiveEvent(S4R.Live_COUNT, -1, y_cal, None, deformation)
         wx.PostEvent(self.parent, evt)
     if self.need_abort == 1:
         return log10(y_cal) - log10(y_cal)
     else:
         return log10(y) - log10(y_cal)
Exemple #55
0
def sgolayderiv(myarray, F):
    """Take the Savitsky-Golay derivative, F must be 5,7 or 9
    need to make this better
    """
    array_size = myarray.shape
    if F == 5:
        conv = scipy.array([-1, 8, 0, -8, 1])
        numb = 12
    elif F == 7:
        conv = scipy.array([-22, 67, 58, 0, -58, -67, 22])
        numb = 252
    elif F == 9:
        conv = scipy.array([-86, 142, 193, 126, 0, -126, -193, -142, 86])
        numb = 1188

    conv_array = scipy.convolve(myarray, conv, 1) / numb

    return conv_array
def plotFishPositionVsTime(jsonData, startState=1, endState=0, smooth=5, axis=0, fmt='b-',wid=1):
    state = jsonData['stateinfo']
    midLine = (jsonData['tankSize_mm'][axis])/2.0

    st,_,nS,_ = aba.state_to_time(jsonData,startState)
    _,et,_,nE = aba.state_to_time(jsonData,endState)

    tracking = aba.getTracking(jsonData)
    tracking = tracking[np.logical_and(tracking[:,0] > st, tracking[:,0] < et),:].copy()
    frametime = tracking[:,0] - st
    position = tracking[:,axis+1]
    
    if 'OMRinfo' in jsonData.keys():
        results = aba.getOMRinfo(jsonData, tankLength =midLine*2)
        color = {-1:'red', 1:'blue'}
        hatch = {-1:'\\', 1:'/'}
        os = results['omrResults']['st']
        oe = results['omrResults']['et']
        od = results['omrResults']['dir']
        for n in range(len(os)):
            p1 = mpl.patches.Rectangle((os[n]-st,0),
                                       width=oe[n]-os[n],
                                       height=midLine*2,alpha=0.5,
                                       color=[.5,1,.5],hatch=hatch[od[n]])
            pyplot.gca().add_patch(p1)
            #pyplot.text(os[n]-st+(oe[n]-os[n])/3, 45, '%0.2f'%results['omrResults']['maxdist'][n])

    pyplot.plot(frametime, position, fmt, lw=1)
    if smooth>0:
        import scipy
        pyplot.plot(frametime, scipy.convolve(position,np.ones(smooth)/smooth, mode='same'))
    pyplot.ylim([0,midLine*2])
    pyplot.xlim([0,et-st])
    if axis==0:
        pyplot.ylabel('')
    else:
        pyplot.ylabel('')
    pyplot.xlabel('Time (s)')
def main():
    parser = common.get_cmd_line_parser(description=__doc__)
    common.ParserArguments.filename(parser)
    common.ParserArguments.plot(parser)
    common.ParserArguments.set_defaults(parser, type='constant')
    args = parser.parse_args()

    sg = FFTGenerator(framerate=args.framerate,
                      verbose=args.debug)

    if args.plot:
        length = 0.05
    else:
        length = 2.0
    main_freqs = [440.0, 660.0]  # Hz
    harmony_freqs = [550.0]  # Hz (major third)

    main_track = sg.generate(main_freqs, length=length * 3)
    print("440 and 660HZ main track is %s seconds or %s frames" %
          (length * 3, len(main_track)))

    harmony_note = sg.generate(harmony_freqs, length=length)
    print("%sHz Harmony is audible for %s seconds or %s frames" %
          (harmony_freqs, length, len(harmony_note)))

    print("Insert the harmony note at the right place a zeroed track.")
    note_frames = seconds_to_frame(length)
    harmony_track = Waveform(scipy.zeros(len(main_track)))
    for index in range(note_frames, note_frames * 2):
        harmony_track._wavedata[index] = (
                harmony_note._wavedata[index - note_frames - 1])

    preprocessed_harmony_track = Waveform(harmony_track._wavedata.copy())
    for index, frame in enumerate(preprocessed_harmony_track.frames):
        preprocessed_harmony_track._wavedata[index] = 0.5 - (0.5 * frame)

    convolution_data = scipy.convolve(main_track.frames,
                                      preprocessed_harmony_track.frames,
                                      mode="full")
    print("Convolution (including mirrored data) is %s seconds or %s frames" %
          (frame_to_seconds(len(convolution_data), framerate=sg.framerate),
           len(convolution_data)))

    print("Normalizing Convolution Amplitude")
    convolution = Waveform(normalize(
        convolution_data[:int(len(convolution_data)/2)]))

    if args.plot:
        _, subplots = pyplot.subplots(4, 1)
        subplots[0].plot(main_track.frames)
        subplots[0].set_title('Main Track 440.0 and 660.0 Hz - Root and Fifth')
        subplots[1].plot(harmony_track.frames)
        subplots[1].set_title('Harmony Major Third (550.0 Hz)')
        subplots[2].plot(preprocessed_harmony_track.frames)
        subplots[2].set_title('Preprocessed Harmony Track')
        subplots[3].plot(convolution.frames)
        subplots[3].set_title('Convolved track of the two waveforms.')
        pyplot.show()
    else:
        from potty_oh.wav_file import wav_file_context
        with wav_file_context(args.filename) as fout:
            fout.write_frames(convolution.frames)

    return 0