Example #1
0
def ddwt(x, num_scales):
    """
    Дискретное вейвлет-преобразование без прореживания
    :param x: входной сигнал
    :param num_scales: число уровней разложения
    :return:
    """
    h = np.array([1, 3, 3, 1], float) / 8
    g = np.array([2, -2], float)
    signal_len = len(x)

    detail = []
    approx = []
    ap = x.copy()
    detail.append(ap.copy())  # на нулевом уровне храним исходный сигнал
    approx.append([])

    for s in range(num_scales):
        dly = 2**s
        hif = convolve(ap, g, mode="full")[dly:dly+signal_len]
        detail.append(hif)
        approx.append(ap)
        if s < num_scales-1:
            ap = convolve(ap, h, mode="full")[dly:dly+signal_len]
            dly_lo = len(h)-1
            ap[:dly_lo] = ap[dly_lo] # хак
            # вместо прореживания сигналов (Маллат) расширяем характеристики
            # фильтров
            h = fexpand(h)
            g = fexpand(g)

    return approx, detail
Example #2
0
def makePriorGrid(zs,rs,dr,outfile,rmin=15,rmax=30):
    rMids = np.arange(16.,28.,0.1)
    drs = 0.5*np.ones(len(rMids))
    drs[rMids<23]=0.5
    drs[rMids<20]=1.0
    drs[rMids<19]=2.0
    zEdges=np.arange(-0.075,5.075,0.1)
    zMids = (zEdges[1:]+zEdges[:-1])/2.
    allH = []
    for i in range(len(rMids)):
        #print(rMids[i])
        rMsk = np.ma.masked_outside(rs,rMids[i]-dr,rMids[i]+dr)
        zMsk = np.ma.masked_array(zs,mask=np.ma.getmask(rMsk)).compressed()

        h = np.histogram(zMsk,bins=zEdges)[0]
        kernel=np.ones(5)*(1./5.)
        h2=sig.convolve(h,kernel,mode='same')
        h3=sig.convolve(h2,kernel,mode='same')
        g = interp1d(zMids,h3,bounds_error=False, fill_value=0.0)
        tot = integrate.quad(g,0.,7.)
        h3 = h3/tot[0]
        
        if i%5==0:
            plt.plot(zMids,h3,lw=3,alpha=0.75,color=cm.jet(i/len(rMids)),label='r='+str(rMids[i]))
        else:
            plt.plot(zMids,h3,lw=3,alpha=0.5,color=cm.jet(i/len(rMids)))
        allH.append(h3)
    return([rMids,zMids,np.array(allH)])
def compute_harris_response(image):
    """ compute the Harris corner detector response function 
        for each pixel in the image"""

    #derivatives
    imx, imy = filtertools.gauss_derivatives(image, 3)

    #kernel for blurring
    gauss = filtertools.gauss_kernel(3)

    #compute components of the structure tensor
    Wxx = signal.convolve(imx*imx, gauss, mode='same')
    Wxy = signal.convolve(imx*imy, gauss, mode='same')
    Wyy = signal.convolve(imy*imy, gauss, mode='same')

    #determinant and trace
    Wdet = Wxx*Wyy - Wxy**2
    Wtr = Wxx + Wyy

    if numpy.count_nonzero(Wtr) == 0:
        return
    print Wtr.shape
    print numpy.count_nonzero(Wtr)

    return Wdet / Wtr
Example #4
0
def test3DConvolution():
    kernel = array([[[-1, 1], [1, 0]], [[1, 0], [0, 0]]])
    kernel_cube = array([
        zeros((3,3)),
        [[0, 0, 0], [0, -1, 1], [0, 1, 0]],
        [[0, 0, 0], [0,  1, 0], [0, 0, 0]]])

    A = reshape(linspace(1,9,9), (3,3), order = 'C')
    B = array((A, A, A))

    C = operators.convolve(kernel, (3, 3, 3), order = 'F')
    adjointTest(C)

    np.testing.assert_allclose(
        reshape(C._forward(ravel(B, order = 'F')), (3, 3, 3), order = 'F'),
        signal.convolve(B, kernel, 'same'))

    np.testing.assert_allclose(
        reshape(C._forward(ravel(B, order = 'F')), (3, 3, 3), order = 'F'),
        signal.convolve(B, kernel_cube, 'same'))

    np.testing.assert_allclose(
        reshape(C._adjoint(ravel(B, order = 'F')), (3, 3, 3), order = 'F'),
        signal.convolve(B,
            np.flipud(np.fliplr(kernel_cube[:,:,::-1])), 'same'))
Example #5
0
def computeTensor(im, sigmaG=1, factorSigma=4):

    # returns 3d array of the size of the image
    # yx stores xx, yx, and yy components of the tensor

    # get the luminance of the image, use [0.3, 0.6, 0.1]
    # use numpy's dot
    # blur the image
    imLum = lum(im)
    
    imLumBlurred = zeros( ( height(im), width(im) ) )

    ndimage.filters.gaussian_filter( imLum, sigmaG, 0, imLumBlurred )

    gradX = signal.convolve(imLumBlurred, Sobel, mode='same')
    gradY = signal.convolve(imLumBlurred, transpose(Sobel), mode='same')
    
    # construct 3 2d arrays of the elements of the tensor
    gradXX = gradX*gradX
    gradYY = gradY*gradY
    gradXY = gradX*gradY

    ndimage.filters.gaussian_filter( gradXX, sigmaG * factorSigma, 0, gradXX )
    ndimage.filters.gaussian_filter( gradXY, sigmaG * factorSigma, 0, gradXY )
    ndimage.filters.gaussian_filter( gradYY, sigmaG * factorSigma, 0, gradYY )


    # construct RGB image based on these vals
    out = constantIm(height(im), width(im), 0.0)

    out[:,:,0] = gradXX
    out[:,:,1] = gradXY
    out[:,:,2] = gradYY

    return out
Example #6
0
def basefreq(audiofile):
    """
    This function reads in the audio file and does the hann windowed fft of 
    the right input. It then smooths the output using a gaussian filter and
    then finds the peaks. It returns the peaks in the right audio channel since
    testing showed there was no significant difference in the two.
    """
    #read the data into an ndarray using scikits-audiolab        
    data, rate, enc = al.aiffread(audiofile)
    #split the left and right channel
    datar = data[:,1]
    datal = data[:,0]
    #take the fft of both of the channels with the hann window applied
    #the hann window reduces spectral leakage in the FFT     
    dftr = abs(fft.fft(datar*signal.hann(len(datar))))
    dftl = abs(fft.fft(datal*signal.hann(len(datal))))
    #compute the frequencies in the FFT
    freq = float(rate)/float(len(datar))
    freqs = np.arange(len(dftr)/2+99)*freq
    dftr = dftr[0:np.size(dftr)/2]
    dftl = dftl[0:np.size(dftr)/2]
    #smooth the fft with a gaussian
    c = signal.gaussian(100,20)
    dftr = signal.convolve(dftr,c)
    dftl = signal.convolve(dftl,c)
    #find the significant peaks in each channel
    peaksr = findpeaks(dftr,freqs)
    peaksl = findpeaks(dftl,freqs)
    #plot the output fft for testing
    #plt.plot(freqs,dftr)
    #plt.show()
    #print peaksr
    return peaksr
Example #7
0
def filtervertical(H, minleafDomain):
	"""
	This function applies the matched filter on the horizontal 2D histogram.
	Returns filter response for both directions, as stairs can face both ways.
	https://www.youtube.com/watch?v=S7qbelm_4Y8 --> explains matched filter good, couldn't make spectralpython matched filter work
	"""
	# from skimage.feature import canny
	# edges = canny(H)
	# plt.imshow(edges,cmap=plt.cm.gray)
	# plt.show()

	filt = create_matched_filter(minleafDomain)
	# Note that the convolution of the time-reversed wavelet is identical to cross-correlation of the wavelet with the wavelet (autocorrelation) in the input signal --> http://crewes.org/ForOurSponsors/ResearchReports/2002/2002-46.pdf
	
	filty=np.transpose(filt) # transpose to also get stairs in other direction
	fr1=signal.convolve(H,filt, mode='same')

	# plt.subplot(1,3,1)
	# plt.imshow(fr1,cmap='spectral',interpolation='none')
	# plt.title('vert matched filter')
	# plt.colorbar()

	fr2=signal.convolve(H,filty, mode='same')
	# fr3=signal.convolve2d(H,filty, mode='same') # should givve the same result as fr2
	# fr3 = fr3**2 # doesn't really work as there are very high 'outliers' that just take everything away

	# plt.subplot(1,3,2)
	# plt.imshow(fr2,cmap='spectral',interpolation='none')
	# plt.title('vert matched filter transpose')
	# plt.colorbar()

	return fr1, fr2
Example #8
0
    def test_input_swapping(self):
        small = arange(8).reshape(2, 2, 2)
        big = 1j * arange(27).reshape(3, 3, 3)
        big += arange(27)[::-1].reshape(3, 3, 3)

        out_array = array(
            [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
              [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
              [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
              [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],

             [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
              [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
              [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
              [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],

             [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
              [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
              [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
              [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],

             [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
              [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
              [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
              [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])

        assert_array_equal(convolve(small, big, 'full'), out_array)
        assert_array_equal(convolve(big, small, 'full'), out_array)
        assert_array_equal(convolve(small, big, 'same'),
                           out_array[1:3, 1:3, 1:3])
        assert_array_equal(convolve(big, small, 'same'),
                           out_array[0:3, 0:3, 0:3])
        assert_raises(ValueError, convolve, small, big, 'valid')
        assert_array_equal(convolve(big, small, 'valid'),
                           out_array[1:3, 1:3, 1:3])
Example #9
0
    def __init__(self, shapein, kernel, mode="full", fft=False, **kwargs):
        if fft:
            from scipy.signal import fftconvolve as convolve
            # check kernel shape parity
            if np.any(np.asarray(kernel.shape) % 2 != 1):
                raise ValueError("Kernels with non-even shapes are not handled for now.")
        else:
            from scipy.signal import convolve

        self.kernel = kernel
        self.mode = mode

        # reverse kernel
        s = (slice(None, None, -1), ) * kernel.ndim
        self.rkernel = kernel[s]
        # reverse mode
        if mode == 'full':
            self.rmode = 'valid'
        elif mode == 'valid':
            self.rmode = 'full'
        elif mode == 'same':
            self.rmode = 'same'
        # shapeout
        if mode == 'full':
            shapeout = [s + ks - 1 for s, ks in zip(shapein, kernel.shape)]
        if mode == 'valid':
            shapeout = [s - ks + 1 for s, ks in zip(shapein, kernel.shape)]
        if mode == 'same':
            shapeout = shapein

        matvec = lambda x: convolve(x, self.kernel, mode=self.mode)
        rmatvec = lambda x: convolve(x, self.rkernel, mode=self.rmode)
        NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
Example #10
0
    def __init__(self, shapein, kernel, mode='reflect', cval=0.0,
                 origin=0, **kwargs):
        """
        Generate a convolution operator wrapping
        scipy.ndimage.convolve function.
        The kernel is reversed for the transposition.
        Note that kernel with even shapes are not handled.
        """
        if kernel.ndim == 1:
            from scipy.ndimage import convolve1d as convolve
        else:
            from scipy.ndimage import convolve

        self.kernel = kernel
        self.mode = mode
        self.cval = cval
        self.origin = origin

        # check kernel shape parity
        if np.any(np.asarray(self.kernel.shape) % 2 != 1):
            ValueError("kernel should have even shape.")

        # reverse kernel
        s = (slice(None, None, -1), ) * kernel.ndim
        self.rkernel = kernel[s]

        shapeout = shapein

        matvec = lambda x: convolve(x, self.kernel, mode=mode, cval=cval,
                                    origin=origin)
        rmatvec = lambda x: convolve(x, self.rkernel, mode=mode, cval=cval,
                                     origin=origin)
        NDOperator.__init__(self, shapein, shapeout, matvec, rmatvec, **kwargs)
Example #11
0
def lsf_to_lpc(all_lsf):
    if len(all_lsf.shape) < 2:
        all_lsf = all_lsf[None]
    order = all_lsf.shape[1]
    all_lpc = np.zeros((len(all_lsf), order + 1))
    for i in range(len(all_lsf)):
        lsf = all_lsf[i]
        zeros = np.exp(1j * lsf)
        sum_zeros = zeros[::2]
        diff_zeros = zeros[1::2]
        sum_zeros = np.hstack((sum_zeros, np.conj(sum_zeros)))
        diff_zeros = np.hstack((diff_zeros, np.conj(diff_zeros)))
        sum_filt = np.poly(sum_zeros)
        diff_filt = np.poly(diff_zeros)

        if order % 2 != 0:
            deconv_diff = sg.convolve(diff_filt, [1, 0, -1])
            deconv_sum = sum_filt
        else:
            deconv_diff = sg.convolve(diff_filt, [1, -1])
            deconv_sum = sg.convolve(sum_filt, [1, 1])

        lpc = .5 * (deconv_sum + deconv_diff)
        # Last coefficient is 0 and not returned
        all_lpc[i] = lpc[:-1]
    return np.squeeze(all_lpc)
Example #12
0
    def convolve(self, f):
        from scipy.signal import convolve

        e_field = self.E_field_as_numpy()
        f_data = np.zeros((self.dim_x(), self.dim_y()))
        result = np.zeros((self.numberEnergies(), self.dim_x(), self.dim_y(), 2), dtype=np.complex128)

        print("Convolve I_X ", end="")
        for i_x, x_cooridinate in enumerate(self.absolute_x_coordinates()):
            if i_x%100 ==0:
                print(" ",i_x , end="")
            for i_y, y_cooridinate in enumerate(self.absolute_y_coordinates()):
                f_data[i_x, i_y] = f(x_cooridinate,y_cooridinate)

        for index_energy in range(self.numberEnergies()):
            for pol in (0,1):
                print("Convolving pol", pol)
                #r = convolve(f_data, f_data,mode='same')
                r = convolve(e_field[index_energy,:,:,pol].real, f_data,mode='same')
                r = r + 1j *convolve(e_field[index_energy,:,:,pol].imag, f_data,mode='same')

                for i_x, x_cooridinate in enumerate(self.absolute_x_coordinates()):
                    for i_y, y_cooridinate in enumerate(self.absolute_y_coordinates()):
                        result[index_energy, i_x , i_y , pol] = r[i_x,i_y]

        convolved_wavefront = NumpyWavefront(result, self.x_start(), self.x_end(), self.y_start(), self.y_end(), self.z())

        return convolved_wavefront
Example #13
0
def get_corr (im, pars):

    # transform 2-d image to 1-d total signal vectors
    totsig, totsigla = get_totsig(im,pars.la)

    ln1 = im.npix / 2
    ln2 = im.npix
    quad_len = ln1*ln1

    # compute staypuft signal for each pixel
    mask = totsig > 40.0
    p0 = np.fabs(pars.ampscale * mask * totsig)
    p1 = np.fabs(pars.ampscale * mask * totsigla)

    ekern = np.exp(-np.arange(ln1*pars.tx)/pars.hh)
    qkern = pars.a1*np.arange(ln1*pars.tx) + pars.a2

    e = convolve (p0, ekern, mode='full')
    q = convolve (p1, qkern, mode='full')
    b = e[0:quad_len] + q[0:quad_len]

    # transform the correction vector back into a 2-d image quad
    b = b[::-1]
    b = np.reshape (b, (ln1,ln1))
    b = np.transpose(b)

    # replicate the correction into all 4 full image quads
    im.data[0:ln1,0:ln1] = b
    im.data[0:ln1,ln1:ln2] = b
    im.data[ln1:ln2,0:ln1] = b
    im.data[ln1:ln2,ln1:ln2] = b

    return im
 def test_consistency_convolve_funcs(self):
     # Compare np.convolve, signal.convolve, signal.convolve2d
     a = np.arange(5)
     b = np.array([3.2, 1.4, 3])
     for mode in ["full", "valid", "same"]:
         assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode))
         assert_almost_equal(np.squeeze(signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode))
def degrades(V, W, H, rate, s, niter=20):
    """
    Deconvolution by Gradient Descent with Sparsification.
    """
    V, W, H = normalize(V), normalize(W), normalize(H)
    for iter in xrange(niter):
        convolved_pieces = np.vstack([signal.convolve(W[r], H[r], mode='full') for r in xrange(W.shape[0])])
        convolved = np.sum(convolved_pieces, axis=0)
        delta = V - convolved
        projected_H = fft_deconvolve(delta, W)
        H = magical_entropy_hammer(H, H + projected_H * rate, 0.01, axis=1)
        
        
        convolved_pieces = np.vstack([signal.convolve(W[r], H[r], mode='full') for r in xrange(W.shape[0])])
        convolved = np.sum(convolved_pieces, axis=0)
        delta = V - convolved
        projected_W = fft_deconvolve(delta, H)
        W = magical_entropy_hammer(W, W + projected_W * rate, 0.001, axis=1)
        
        print np.sum(np.abs(delta))
        print H
        
    pylab.clf()
    pylab.subplot(311)
    pylab.plot(W.T)
    pylab.subplot(312)
    pylab.plot(H.T)
    pylab.subplot(313)
    pylab.plot(V)
    pylab.plot(convolved)
    return W, H
Example #16
0
def kfilter_coeffs(fs):
	# Pre filter
	f0 = 1500.0;
	w0 = 2*np.pi*f0/fs;
	Q = 0.5
	gain = 4.0
	A = 10**(gain/40)
	S = 1.0
	alpha = np.sin(w0)/2 * np.sqrt((A + 1/A)*(1/S -1) + 2);
	b0 =    A*( (A+1) + (A-1)*np.cos(w0) + 2*np.sqrt(A)*alpha );
	b1 = -2*A*( (A-1) + (A+1)*np.cos(w0)                   );
	b2 =    A*( (A+1) + (A-1)*np.cos(w0) - 2*np.sqrt(A)*alpha );
	a0 =        (A+1) - (A-1)*np.cos(w0) + 2*np.sqrt(A)*alpha;
	a1 =    2*( (A-1) - (A+1)*np.cos(w0)                   );
	a2 =        (A+1) - (A-1)*np.cos(w0) - 2*np.sqrt(A)*alpha;
	b_pre = [b0, b1, b2]/a0
	a_pre = [a0, a1, a2]/a0

	# Highpass
	f0 = 38.0;
	w0 = 2*np.pi*f0/fs;
	Q = 0.5;
	alpha = np.sin(w0)/(2*Q);
	b0 =  (1 + np.cos(w0))/2;
	b1 = -(1 + np.cos(w0));
	b2 =  (1 + np.cos(w0))/2;
	a0 =  1 + alpha;
	a1 = -2*np.cos(w0);
	a2 =  1 - alpha;
	b_hp = [b0, b1, b2]/a0
	a_hp = [a0, a1, a2]/a0

	b = signal.convolve(b_pre, b_hp)
	a = signal.convolve(a_pre, a_hp)
	return (b, a)
Example #17
0
def gaussian( u, v, size) :
    """Smooths the velocity field with a Gaussian kernel.
    
    Parameters
    ----------
    u : 2d np.ndarray
        the u velocity component field
        
    v : 2d np.ndarray
        the v velocity component field
        
    size : int
        the half width of the kernel. Kernel
        has shape 2*size+1
        
    Returns
    -------
    uf : 2d np.ndarray
        the smoothed u velocity component field
        
    vf : 2d np.ndarray
        the smoothed v velocity component field    
        
    """
    g = _gaussian_kernel( size=size )
    uf = convolve( u, g, mode='same')
    vf = convolve( v, g, mode='same')
    return uf, vf
Example #18
0
  def __init__ (self, var, saxis, kernel, fft):
  # {{{
    ''' __init__()'''
    import numpy as np

    assert len(kernel) <= var.shape[saxis], 'Kernel must not be longer than dimension being smoothed.'

    # Construct new variable
    self.saxis = saxis
    self.var = var
    self.kernel = kernel
    self.fft = fft
    self.klen = len(kernel)

    # Normalize and reshape kernel
    self.kernel /= np.sum(self.kernel)
    self.kernel.shape = [self.klen if i == saxis else 1 for i in range(var.naxes)]

    # Determine which convolution function to use
    from scipy import signal as sg
    tdata = np.ones(len(kernel), 'd')
    if self.fft:
      try:
        sg.fftconvolve(tdata, kernel, 'same', old_behaviour=False)
        self._convolve = lambda x, y, z: sg.fftconvolve(x, y, z, old_behaviour=False)
      except TypeError:
        self._convolve = sg.fftconvolve
    else:
      try:
        sg.convolve(tdata, kernel, 'same', old_behaviour=False)
        self._convolve = lambda x, y, z: sg.convolve(x, y, z, old_behaviour=False)
      except TypeError:
        self._convolve = sg.convolve

    Var.__init__(self, var.axes, var.dtype, name=var.name, atts=var.atts, plotatts=var.plotatts)
Example #19
0
def gaussian_differentiation_kernel(sigma, num_stds, order, delta, scale):
    """
    http://en.wikipedia.org/wiki/Scale_space#Gaussian_derivatives
    :param sigma:
    :param num_stds:
    :param order:
    :param delta:
    :param scale:
    :return:
    """
    delta = list(delta)
    g = gaussian_kernel(sigma, num_stds)
    d = np.ones((1,) * len(order))
    for i in range(len(order)):
        _d = differentiation_kernel(order[i]).astype(float)
        if len(_d) % 2 != 1:
            _d = (np.pad(_d, ((0, 1),), 'constant') + np.pad(_d, ((1, 0),), 'constant')) / 2.
            delta[i] *= 2
        _d /= delta[i] ** order[i]
        _d *= np.sqrt(scale[i]) ** order[i]
        shp = np.ones(len(order), int)
        shp[i] = _d.shape[0]
        _d.shape = shp
        d = spsig.convolve(d, _d)
    d = spsig.convolve(g, d)
    return d
Example #20
0
def find_peaks(fft_set, sign="+", alpha=0.15, threshold=2.0):
    """
    Method to find peaks on a fft set.
    
    :type threshold: object
    :param fft_set:
    :param sign:
    :param alpha: noise threshold
    :param threshold:
    :return:
    """

    if sign == "-":
        fft_set = -fft_set

    # Get derivative
    derivation_vector = [1, 0, -1]
    d_fft_set = convolve(fft_set, derivation_vector, "same")

    # Checking for sign-flipping and derivative
    _sign = np.sign(d_fft_set)
    d_sign = convolve(_sign, derivation_vector, "valid")

    candidates = np.where(d_fft_set > 0)[0] + (len(derivation_vector) - 1)

    peaks = sorted(set(candidates).intersection(np.where(d_sign == -2)[0] + 1))

    # Noise remover
    peaks = np.array(peaks)[fft_set[peaks] > alpha]

    return clean_adjacent_points(peaks, fft_set, float(threshold))
Example #21
0
def deriv2D(data,axis=-1,dx=1.0,noise_suppression=True):
  """ Takes 1D or 2D Derivative of 2D array using convolution
	
	result = deriv2D(data)
	result = deriv2D(data, dx)
	
	output is 2D (if only one axis specified)
	output is 3D if no axis specified [nx,ny,2] with the third dimension being [dfdx, dfdy]
	
	keywords:
	axis = 0/1  If no axis specified 2D derivative will be returned
	dx = 1.0    axis spacing, must be 2D if 2D deriv is taken - default is [1.0,1.0]
	noise_suppression = True   noise suppressing coefficients used to take derivative - default = True
  """
  s = data.shape
  if axis > len(s)-1:
    raise RuntimeError("ERROR: axis out of bounds for derivative")
 
  if noise_suppression:  
    if s[axis] < 11:
      raise RuntimeError("Data too small to use 11th order method")
    tmp = array([old_div(-1.0,512.0),old_div(-8.0,512.0),old_div(-27.0,512.0),old_div(-48.0,512.0),old_div(-42.0,512.0),0.0,old_div(42.0,512.0),old_div(48.0,512.0),old_div(27.0,512.0),old_div(8.0,512.0),old_div(1.0,512.0)])
  else:
    if s[axis] < 9:
      raise RuntimeError("Data too small to use 9th order method")
    tmp = array([old_div(1.0,280.0),old_div(-4.0,105.0),old_div(1.0,5.0),old_div(-4.0,5.0),0.0,old_div(4.0,5.0),old_div(-1.0,5.0),old_div(4.0,105.0),old_div(-1.0,280.0)])       
  
  N = old_div((tmp.size-1),2)
  if axis==1:
    W = transpose(tmp[:,None])
    data_deriv = convolve(data,W,mode='same')/dx*-1.0
    for i in range(s[0]):
      data_deriv[i,0:N-1] = old_div(deriv(data[i,0:N-1]),dx)
      data_deriv[i,s[1]-N:] = old_div(deriv(data[i,s[1]-N:]),dx)
    
  elif axis==0:
    W = tmp[:,None]
    data_deriv = convolve(data,W,mode='same')/dx*-1.0
    for i in range(s[1]):
      data_deriv[0:N-1,i] = old_div(deriv(data[0:N-1,i]),dx)
      data_deriv[s[0]-N:,i] = old_div(deriv(data[s[0]-N:,i]),dx)
  else:
    data_deriv = zeros((s[0],s[1],2))
    if len(dx)==1:
      dx = array([dx,dx])
    
    W = tmp[:,None]#transpose(multiply(tmp,ones((s[1],tmp.size))))
    data_deriv[:,:,0] = convolve(data,W,mode='same')/dx[0]*-1.0
    for i in range(s[1]):
      data_deriv[0:N-1,i,0]  =  old_div(deriv(data[0:N-1,i]),dx[0])
      data_deriv[s[0]-N:s[0]+1,i,0] = old_div(deriv(data[s[0]-N:s[0]+1,i]),dx[0])
    
    W = transpose(tmp[:,None])#multiply(tmp,ones((s[0],tmp.size)))
    data_deriv[:,:,1] = convolve(data,W,mode='same')/dx[1]*-1.0
    for i in range(s[0]):
      data_deriv[i,0:N-1,1] = old_div(deriv(data[i,0:N-1]),dx[1])
      data_deriv[i,s[1]-N:s[1]+1,1] = old_div(deriv(data[i,s[1]-N:s[1]+1]),dx[1])

  return data_deriv
Example #22
0
def kernel_func(X, Y):
    r"""Smooth histogram Y with kernel Y

    """
    if Y is None:
        return X

    return (convolve(X, Y, mode="same")
            / convolve(np.ones_like(X), Y, mode="same"))
Example #23
0
def dconv2d_k(dout,input,k,type):
    dk=None
    if type=='valid':
        dk=sg.convolve(input, numpy.rot90(dout,2),'valid')
    elif type=='full':
        dk=sg.convolve(dout,numpy.rot90(dout,2),'valid')
    else:
        raise TypeError('type argument is error')
    return dk
def ABAB(A,B,f,t):
    rate = 1/(t[1]-t[0])
    y = np.zeros(len(t))
    c1 = dirac_comb(f,t)
    c2 = half_period_shift(dirac_comb(f,t),f,rate)
    y1 = convolve(A,c1)
    y2 = convolve(B,c2)
    y = y1+ y2
    return y[0:len(t)]
    def calculate_flow(self, tactile_array, rows, columns):
        """
        Calculates the optical flow in a tactile array using convolution as described
        in [1].


        :param tactile_array: A list (one-dimensional) containing the intensity values
                              of a tactile sensor.
        :type tactile_array: []

        :param rows: The vertical size, in tactels, of the tactile sensor.
        :type rows: int

        :param columns: The horizontal size, in tactels, of the tactile sensor.
        :type columns: int

        :return: The slip vectors in the horizontal (X) and vertical direction (Y).
        :rtype: (float, float)

        [1] Alcazar, J. A., and Barajas, L. G. "Estimating object grasp sliding via
            pressure array sensing." In Robotics and Automation (ICRA), 2012 IEEE
            International Conference on, pp. 1740-1746. IEEE, 2012.

        """
        assert (rows * columns) == len(tactile_array), "Rows and columns do not match array size!"
        tactile_matrix = numpy.array([tactile_array])
        tactile_matrix = tactile_matrix.reshape((rows, columns))

        # convolve the current tactile matrix with the previous tactile matrix
        if self.previous_tactile_matrix is None:
            convolved_matrix = signal.convolve(tactile_matrix, tactile_matrix)
        else:
            convolved_matrix = signal.convolve(
                self.previous_tactile_matrix, tactile_matrix
            )

        # compute slip indices
        [slip_index_x, slip_index_y] = slip_utils.calculate_slip_indices(convolved_matrix)

        # create slip vectors
        if self.previous_slip_index_x:
            slip_vector_x = slip_index_x - self.previous_slip_index_x
        else:
            slip_vector_x = slip_index_x

        if self.previous_slip_index_y:
            slip_vector_y = slip_index_y - self.previous_slip_index_y
        else:
            slip_vector_y = slip_index_y

        # update variables
        self.previous_tactile_matrix = tactile_matrix
        self.previous_slip_index_x = slip_index_x
        self.previous_slip_index_y = slip_index_y

        return slip_vector_x, slip_vector_y
Example #26
0
def apply_filter(im, myfilter, dim):
    H, W = im.shape
    if dim == 'col':
        im_filt = sgn.convolve(im.flatten(), myfilter, 'same')
        im_filt = np.reshape(im_filt, [H, W])
    elif dim == 'row':
        im_filt = sgn.convolve(im.T.flatten(), myfilter, 'same')
        im_filt = np.reshape(im_filt, [W, H]).T
    
    return im_filt
Example #27
0
    def get_mask(self, ar_flux):
        # detect low flux regions using threshold
        # convolve and divide by box_size to keep the same scale
        spec1_smooth = signal.convolve(ar_flux > -0.8, self.detect_boxcar, mode='same') / self.detect_box_size

        # expand the mask to nearby pixels by smoothing
        mask_thresh = np.array(spec1_smooth < 0.2)
        mask_smooth = signal.convolve(mask_thresh, self.mask_boxcar, mode='same') > 1

        return mask_smooth
Example #28
0
def peak_finder(arr, smoothing=5, ddy_thresh=-300, dy0_thresh=5):
    array        = medfilt(arr, smoothing) 
    x            = arange(len(array))
    kernel       = [4, 0, -4]
    dY           = convolve(array, kernel, 'same') 
    ddy          = convolve(dY, kernel, 'same')
    falloff      = -15000*exp(-0.003*x) #This has to be worked on
    masked_array = ma.masked_where(logical_or(ddy>falloff+ddy_thresh, abs(dY) > dy0_thresh) , arr) 
    x_masked     = ma.array(x, mask=masked_array.mask)
    return ma.compressed(x_masked)
Example #29
0
def computeRawAAL(rawPosVector, cMass=False, groverFormula=False):
    """
    Computation of angle-arc-length signature of a trajectory
    
    """
    
    rpv = rawPosVector
    kernel = np.array([-1, 1])
    if cMass:
        pcMass = np.sum(rpv, axis=0) / np.float(rpv.shape[1])
    else:
        M_ref = np.array([1,0, 0]).transpose()
#     M_ref = np.array([1,0])
    
    dx = signal.convolve(rpv[:, 0], kernel, 'same')
    dy = signal.convolve(rpv[:, 1], kernel, 'same')
    # dirty fix of large values in the first elements
    dx[0] = dx[1]
    dy[0] = dy[1]
    
    M_t = np.array([dx, dy]).transpose()
    
    alpha = []
    sign = 0
    for i in range(M_t.shape[0]):    
        if cMass:
            if i == 0:                
                M_ref = np.append(rpv[i, :] - pcMass, 0)
            else:
                M_ref = np.append(rpv[i-1, :] - pcMass, 0)
            
        m_t = np.append(M_t[i, :], 0)
        if groverFormula:
            inner = np.linalg.norm(np.cross(M_t[i, :], M_ref))
            outer = np.dot(m_t, M_ref)
            alpha += [np.arctan2(inner, outer)]
        else:
            dot = np.dot(m_t, M_ref)
            deNom = np.linalg.norm(m_t) * np.linalg.norm(M_ref)
            sign = 0
            if np.cross(m_t, M_ref)[2] >= 0:
                sign = 1
            else:
                sign = -1
            
            if deNom == 0:
                # no movement
                alpha += [0]
            else:
                alpha += [sign * np.arccos(dot / deNom)]

    M = np.add.accumulate(np.sqrt((M_t[:, 0] - M_t[:, 1])**2))
    
    
    return np.array([alpha, M]).transpose()
def gauss_derivatives(im, n, ny=None):
    """ returns x and y derivatives of an image using gaussian 
        derivative filters of size n. The optional argument 
        ny allows for a different size in the y direction."""

    gx,gy = gauss_derivative_kernels(n, sizey=ny)

    imx = signal.convolve(im,gx, mode='same')
    imy = signal.convolve(im,gy, mode='same')

    return imx,imy
Example #31
0
import numpy as np
from scipy import signal 
from matplotlib import pyplot as plt
import mysignals as sigs

corr_output_signal = signal.correlate(sigs.InputSignal_1kHz_15kHz, sigs.Impulse_response, mode="same")
conv_output_signal = signal.convolve(sigs.InputSignal_1kHz_15kHz, sigs.Impulse_response, mode="same")

plt.plot(corr_output_signal)
plt.show()

#correlation looks the same as convelution becaus the signal is symmetrical in this case


Example #32
0
import numpy as np
from matplotlib import pyplot as plt
from pwtools import signal
from scipy.signal import convolve, gaussian, correlate
from scipy.fftpack import fft

nn = 200
nadd = 5 * nn
t = np.linspace(0.123, 0.567, nn)
x = np.sin(2 * pi * 10 * t) + np.cos(2 * pi * 3 * t) + np.sin(2 * pi * 30 * t)
dt = t[1] - t[0]

pad_x = signal.pad_zeros(x, nadd=nadd)
pad_welch_x = signal.pad_zeros(x * signal.welch(nn), nadd=nadd)
kern = gaussian(M=20, std=2)  # width M must be 6..10 x std
smooth_pad_x = convolve(signal.pad_zeros(x, nadd=nadd), kern, 'same') / 10.0
##mirr_x = signal.mirror(x)
##welch_mirr_x = signal.mirror(x)*signal.welch(2*nn-1)
##pad_welch_mirr_x = signal.pad_zeros(signal.mirror(x)*signal.welch(2*nn-1),
##                                    nadd=2*nn-1)

plt.figure()
plt.plot(pad_x, label='pad_x (padded signal)')
plt.plot(pad_welch_x, label='pad_welch_x')
plt.plot(smooth_pad_x, label='smooth_pad_x')
plt.xlabel('time [s]')
plt.xlim(0, 300)
plt.legend()

plt.figure()
f, d = signal.ezfft(x, dt)
Example #33
0
 def test_complex(self):
     x = array([1 + 1j, 2 + 1j, 3 + 1j])
     y = array([1 + 1j, 2 + 1j])
     z = convolve(x, y)
     assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
Example #34
0
 def test_same_mode(self):
     a = [1, 2, 3, 3, 1, 2]
     b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
     c = convolve(a, b, 'same')
     d = array([57, 61, 63, 57, 45, 36])
     assert_array_equal(c, d)
Example #35
0
 def test_valid_mode(self):
     a = [1, 2, 3, 6, 5, 3]
     b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
     c = convolve(a, b, 'valid')
     assert_array_equal(c, array([70, 78, 73, 65]))
Example #36
0
        raise ValueError("a must be 2-d")
    code = r"""
    int i,j;
    for(i=1;i<Na[0]-1;i++) {
        for(j=1;j<Na[1]-1;j++) {
            B2(i,j) = A2(i,j) + A2(i-1,j)*0.5 +
                      A2(i+1,j)*0.5 + A2(i,j-1)*0.5
                      + A2(i,j+1)*0.5
                      + A2(i-1,j-1)*0.25
                      + A2(i-1,j+1)*0.25
                      + A2(i+1,j-1)*0.25
                      + A2(i+1,j+1)*0.25;
        }
    }
    """
    b = zeros_like(a)
    weave.inline(code, ['a', 'b'])
    return b


a = [None] * 10
print(example1(a))
print(a)

a = rand(512, 512)
b = arr(a)

h = [[0.25, 0.5, 0.25], [0.5, 1, 0.5], [0.25, 0.5, 0.25]]
import scipy.signal as ss
b2 = ss.convolve(h, a, 'same')
Example #37
0
tinnom = 'Sin(x)/(x)'
s = np.linspace(-10, 10, 50)
sin = np.sin(s) / (s)

winnom = 'Escalon'
es = np.arange(0, 10, 0.5)
esc = np.piecewise(es, es >= 5, [1, 0])

signom = 'Impulso'
i = np.linspace(0, 10, 50)
imp = np.zeros(len(s))
imp[25] = 1

sig = imp
win = esc
filtered = signal.convolve(sig, win, mode='same') / sum(win)
import matplotlib.pyplot as plt
fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
ax_orig.plot(sig)
ax_orig.set_title(signom)
ax_orig.margins(0, 0.1)
ax_win.plot(win)
ax_win.set_title(winnom)
ax_win.margins(0, 0.1)
ax_filt.plot(filtered)
ax_filt.set_title('Señales Convolucionadas')
ax_filt.margins(0, 0.1)
fig.tight_layout()
plt.show()
Example #38
0
 def smooth(self, a, filter_n=51, width=0.5):
     W = signal.slepian(filter_n, width=width)
     W = W/np.sum(W)
     s = signal.convolve(a, W, mode='valid')
     return np.hstack([[s[0]]*(filter_n//2), s, [s[-1]]*(filter_n//2)])
Example #39
0
def moving_average(sig, r=5):
    from scipy.signal import convolve
    return convolve(sig, ones((r, )) / r, mode='valid')
def add_color_patches_rand_gt(data,
                              opt,
                              prev,
                              p=.125,
                              num_points=None,
                              use_avg=True,
                              samp='normal'):
    # Add random color points sampled from ground truth based on:
    #   Number of points
    #   - if num_points is 0, then sample from geometric distribution, drawn from probability p
    #   - if num_points > 0, then sample that number of points
    #   Location of points
    #   - if samp is 'normal', draw from N(0.5, 0.25) of image
    #   - otherwise, draw from U[0, 1] of image

    N, C, H, W = data['ab'].shape
    data['hint_B'] = torch.zeros_like(data['ab'])
    data['mask_B'] = torch.zeros_like(data['gray'])
    if prev is not None:
        l2_dist = torch.sum(torch.pow(data['ab'] - prev, 2),
                            dim=1).detach().numpy()
        k = 1 / opt.mean_kernel**2 * np.ones(
            [opt.mean_kernel, opt.mean_kernel])
        l2_mean = np.zeros([
            l2_dist.shape[0],
            int(opt.fineSize / opt.mean_kernel),
            int(opt.fineSize / opt.mean_kernel)
        ])
        for i in range(l2_dist.shape[0]):
            l2_mean[i, :, :] = signal.convolve(
                l2_dist[i, :, :], k,
                mode="valid")[::opt.mean_kernel, ::opt.mean_kernel]  # 7x7 mean

    for nn in range(N):
        pp = 0
        cont_cond = True
        while cont_cond:
            if num_points is None:  # draw from geometric
                # embed()
                cont_cond = np.random.rand() < (1 - p)
            else:  # add certain number of points
                cont_cond = pp < num_points
            if not cont_cond:  # skip out of loop if condition not met
                continue

            P = np.random.choice(opt.sample_Ps)  # patch size

            # sample location
            if samp == 'normal':  # geometric distribution
                h = int(
                    np.clip(
                        np.random.normal((H - P + 1) / 2., (H - P + 1) / 4.),
                        0, H - P))
                w = int(
                    np.clip(
                        np.random.normal((W - P + 1) / 2., (W - P + 1) / 4.),
                        0, W - P))
            elif samp == 'uniform':  # uniform distribution
                h = np.random.randint(H - P + 1)
                w = np.random.randint(W - P + 1)
            # sample location - L2 distance method
            else:
                area_h_, area_w_ = np.where(
                    l2_mean[nn, :, :] == np.max(l2_mean[nn, :, :]))
                area_h, area_w = area_h_[0] * opt.mean_kernel, area_w_[
                    0] * opt.mean_kernel
                # set to 0 in case of repeating
                l2_mean[nn, area_h_[0], area_w_[0]] = 0
                max_area = l2_dist[nn, area_h:area_h + opt.mean_kernel,
                                   area_w:area_w + opt.mean_kernel]
                h_, w_ = np.where(max_area == np.max(max_area))
                h, w = h_[0] + area_h, w_[0] + area_w

            # add color point
            if use_avg:
                # embed()
                data['hint_B'][nn, :, h:h + P, w:w + P] = torch.mean(
                    torch.mean(data['ab'][nn, :, h:h + P, w:w + P],
                               dim=2,
                               keepdim=True),
                    dim=1,
                    keepdim=True).view(1, C, 1, 1)
            else:
                data['hint_B'][nn, :, h:h + P,
                               w:w + P] = data['ab'][nn, :, h:h + P, w:w + P]

            data['mask_B'][nn, :, h:h + P, w:w + P] = 1

            # increment counter
            pp += 1

    data['mask_B'] -= opt.mask_cent
    data['clicks'] = torch.cat((data['mask_B'], data['hint_B']), dim=1)

    return data
Example #41
0
def vad(x, framelen=None, sr=None, frameshift=None):
    if sr is None:
        sr = 16000
    if framelen is None:
        framelen = 256
    if frameshift is None:
        frameshift = 128
    amp_th1 = 8
    amp_th2 = 20
    zcr_th = 5

    maxsilence = 8
    minlen = 15
    status = 0
    count = 0
    silence = 0

    x = x / np.absolute(x).max()

    tmp1 = enframe(x[0:(len(x) - 1)], framelen, frameshift)
    tmp2 = enframe(x[1:(len(x) - 1)], framelen, frameshift)
    signs = (tmp1 * tmp2) < 0
    diffs = (tmp1 - tmp2) > 0.05
    zcr = np.sum(signs * diffs, axis=1)

    filter_coeff = np.array([1, -0.9375])
    pre_emphasis = signal.convolve(x, filter_coeff)[0:len(x)]
    amp = np.sum(np.absolute(enframe(pre_emphasis, framelen, frameshift)), axis=1)

    amp_th1 = min(amp_th1, amp.max() / 3)
    amp_th2 = min(amp_th2, amp.max() / 8)

    x1 = []
    x2 = []
    t = 0

    for n in range(len(zcr)):
        if status == 0 or status == 1:
            if amp[n] > amp_th1:
                x1.append(max(n - count - 1, 1))
                status = 2
                silence = 0
                count = count + 1
            elif amp[n] > amp_th2 or zcr[n] > zcr_th:
                status = 1
                count = count + 1
            else:
                status = 0
                count = 0
            continue
        if status == 2:
            if amp[n] > amp_th2 or zcr[n] > zcr_th:
                count = count + 1
            else:
                silence = silence + 1
                if silence < maxsilence:
                    count = count + 1
                elif count < minlen:
                    status = 0
                    silence = 0
                    count = 0
                else:
                    status = 0
                    count = count - silence / 2
                    x2.append(x1[t] + count - 1)
                    t = t + 1

    return x1, x2
Example #42
0
def smoothing(response, response_time):
    '''
    Smoothing response
    :param response: numpy array (n_trial, n_time)
    :param response_time: numpy array (n_time,)
    :return:
    '''
    dt = response_time[1] - response_time[0]

    filter_type = 'gauss'
    filter_width = 0.04

    # Make filter
    if filter_type == 'gauss':

        # Filter parameters
        fw = filter_width

        # Time axis
        nt = round(fw*3.75/dt)
        tf = np.arange(-nt, nt+1)*dt

        # Make filter
        rf = np.exp(-tf**2/(2.*fw**2))

    elif filter_type == 'box':

        # Filter parameters
        fw = filter_width

        # Box width
        nt = round(fw/2/dt)

        # Make filter
        rf = np.ones(2*nt+1)

    rf /= sum(rf)

    # Initialize
    response_smth = response

    # if len(rf) < 2:
    #     return response_smth

    # Temporal smoothing
    # Dimensions
    n_trial, n_time = response.shape

    from scipy.signal import convolve

    # Loop over trials
    for i_trial in range(n_trial):
        # Pad extremes
        rpad = np.concatenate((np.repeat(response[i_trial,0], len(rf)),
                               response[i_trial,:],
                               np.repeat(response[i_trial,-1], len(rf))))

        filtered = convolve(rpad, rf, mode='same')

        # Shift
        rfil = filtered[len(rf):len(rf)+n_time]

        response_smth[i_trial] = rfil

    return response_smth
def minimize_junction_Y(amplitude, peaks, peak_type, dx):
    print("Analyzing Y jaws...")

    amp_prev = 0
    amp_filt_prev = 0

    fig = plt.figure(figsize=(10, 6))  # create the plot

    kk = 0  # counter for figure generation
    for j in range(0, amplitude.shape[1] - 1):
        for k in range(j + 1,
                       amplitude.shape[1]):  # looping through remaining images
            # print('k=',k)
            amp_base_res = signal.convolve(amplitude[:, j],
                                           amplitude[:, j],
                                           mode="full")
            amp_base_res = signal.resample(
                amp_base_res / np.amax(amp_base_res),
                int(np.ceil(len(amp_base_res) / 2)),
            )

            amp_overlay_res = signal.convolve(amplitude[:, k],
                                              amplitude[:, k],
                                              mode="full")
            amp_overlay_res = signal.resample(
                amp_overlay_res / np.amax(amp_overlay_res),
                int(np.ceil(len(amp_overlay_res) / 2)),
            )
            peak1, _ = find_peaks(amp_base_res, prominence=0.5)
            peak2, _ = find_peaks(amp_overlay_res, prominence=0.5)

            if (abs(peak2 - peak1) < 2500
                ):  # if the two peaks are close together proceeed to analysis
                kk = kk + 1  # incrementing the figure generator
                cumsum_prev = 1e7
                if peak2 < peak1:
                    amp_base_res = amplitude[:, k]
                    amp_overlay_res = amplitude[:, j]
                else:
                    amp_base_res = amplitude[:, j]
                    amp_overlay_res = amplitude[:, k]

                if (peak_type[kk - 1] == 0
                    ):  # kk-1 to start from the null element of the array
                    inc = -1
                else:
                    inc = 1

                for i in range(0, inc * 80, inc * 1):
                    # x = np.linspace(0, 0 + (len(amp_base_res) * dx), len(amplitude),
                    #                 endpoint=False)  # definition of the distance axis
                    amp_overlay_res_roll = np.roll(amp_overlay_res, i)

                    # amplitude is the vector to analyze +-500 samples from the center
                    amp_tot = (
                        amp_base_res[peaks[kk - 1] - 1000:peaks[kk - 1] + 1000]
                        + amp_overlay_res_roll[peaks[kk - 1] -
                                               1000:peaks[kk - 1] + 1000]
                    )  # divided by 2 to normalize

                    # print('Analyzing peak=',peaks[kk - 1])
                    # xsel = x[peaks[kk-1] - 1000:peaks[kk-1] + 1000]

                    amp_filt = rm.running_mean(amp_tot, 281)
                    cumsum = np.sum(np.abs(amp_tot - amp_filt))

                    if (  # pylint: disable = no-else-break
                            cumsum > cumsum_prev):  # then we went too far
                        break
                    else:
                        amp_prev = amp_tot
                        amp_filt_prev = amp_filt
                        cumsum_prev = cumsum

                ax = fig.add_subplot(amplitude.shape[1] - 1, 1, kk)
                ax.plot(amp_prev)
                ax.plot(amp_filt_prev)
                if kk == 1:
                    ax.set_title("Minimization result", fontsize=16)
                if (kk == amplitude.shape[1] - 1
                    ):  # if we reach the final plot the add the x axis label
                    ax.set_xlabel("distance [mm]")
                ax.set_ylabel("amplitude")
                if peaks[kk - 1] != 0:
                    ax.annotate(
                        "delta=" + str(abs(i - inc * 1) * dx) + " mm",
                        xy=(2, 1),
                        xycoords="axes fraction",
                        xytext=(0.35, 0.10),
                    )
                else:
                    ax.annotate(
                        "delta= 0 mm (NO PEAK FOUND)",
                        xy=(2, 1),
                        xycoords="axes fraction",
                        xytext=(0.35, 0.10),
                    )

            # else:
            #     print(j, k, 'the data is not contiguous finding another curve in dataset')

    return fig
    def mix_spc_noi_0401(self, mode):
        """
        (1) load multi-channel speech
        (2) calculate noise rir (=speech rir + 90/180/270 degree)
        (3) convolve noise (random selected) and noise rir
        (4) mix multi-channel speech and multi-channel noise with SNR (random selected)
        :param mode: tr/cv/tt
        :return: save noisy(mix), clean(s1), noise(s2) files / save 'output.csv' file
        """

        # path set
        spc_path = os.getcwd() + '/multi_channel_speech/' + mode
        noi_path = os.getcwd() + '/Data/' + mode + '/noise'
        snr_list = [-5, 0, 5, 10, 15, 20]
        save_path = os.getcwd() + '/output/' + mode
        Path(save_path + '/mix').mkdir(parents=True, exist_ok=True)
        Path(save_path + '/s1').mkdir(parents=True, exist_ok=True)
        Path(save_path + '/s2').mkdir(parents=True, exist_ok=True)

        # multi-channel speech list
        s_list = glob(spc_path + '/*.wav')
        # single-channel noise list
        n_list = glob(noi_path + '/*.wav')

        # make 'output.csv'
        f = open(f'output/{mode}/output.csv', 'w', newline='')
        wr = csv.writer(f)
        wr.writerow([
            'order', 'speech', 'room', 'speech_rir', 'noise', 'noise_rir',
            'azimuth', 'snr'
        ])

        for i, s in enumerate(s_list):
            multi_ch_aud, fs = librosa.core.load(s, sr=None, mono=False)
            multi_ch_aud_na = os.path.splitext(os.path.basename(s))[0]

            # select noise azimuth
            split = multi_ch_aud_na.split('_')
            spc_na = f'{split[-2]}_{split[-1]}'
            spc_rir_na = f'{split[1]}_{split[2]}_{split[3]}'
            # print(time.time() - start)

            # noise rir = speech rir + 90/180/270 degree
            az = int(split[1][2:])
            room = split[0]
            n = np.random.randint(1, 4)
            noi_az = (az + 90 * n) % 360  # +90 +180 +270 degree
            split = multi_ch_aud_na.split('_')
            noi_rir_na = f'az{noi_az}_{split[2]}_{split[3]}'
            noi_rir = os.getcwd() + f'/rir/{mode}/{split[0]}/{noi_rir_na}.npz'
            if mode == 'cv':
                noi_rir = os.getcwd() + f'/rir/tr/{split[0]}/{noi_rir_na}.npz'

            # select and load random noise
            idx_n = np.random.randint(0, len(n_list))
            noi, fs2 = librosa.core.load(n_list[idx_n], sr=None)
            noi_na = os.path.splitext(os.path.basename(n_list[idx_n]))[0]
            assert fs == fs2

            # convolve noise with RIR
            npz = np.load(noi_rir, allow_pickle=True)
            rir = npz['rir']
            rand_start = np.random.randint(
                0, noi.shape[0] - multi_ch_aud.shape[1] - 8191)
            multi_ch_noi_tmp = ss.convolve(
                rir, noi[rand_start:rand_start + multi_ch_aud.shape[1] + 8191,
                         np.newaxis])

            multi_ch_noi = multi_ch_noi_tmp[8191:-8191, :].transpose()

            # mix speech and noise with SNR
            idx_snr = np.random.randint(0, len(snr_list))
            snr = snr_list[idx_snr]
            noisy, clean, noise = self.snr_mix(multi_ch_aud, multi_ch_noi, snr)

            audiowrite(
                save_path + f'/mix/noisy_{i + 1:#05d}_{noi_na}_{snr}.wav',
                noisy.transpose(), fs)
            audiowrite(save_path + f'/s1/clean_{i + 1:#05d}.wav',
                       clean.transpose(), fs)
            audiowrite(save_path + f'/s2/noise_{i + 1:#05d}.wav',
                       noise.transpose(), fs)

            wr.writerow(
                [i, spc_na, room, spc_rir_na, noi_na, noi_rir_na, 90 * n, snr])

        f.close()
Example #45
0
zeroframe = np.mean(bigzero, axis=2)

print(np.mean(zerolist - zeroframe))
print(np.std(zerolist - zeroframe))
#quit()
hdulist = fits.open('Dark40.fit')
darkarr = hdulist[0].data
redarr = darkarr - zeroframe
bins = range(-20, 40)
plt.hist(np.ravel(redarr), bins, normed='True')
#plt.show()

#print(np.mean(bigzero(100,100,:)),np.std(bigzero(100,100,:)))
#hdulist = fits.open('Dark120.fit')
#dark40arr = hdulist[0].data
#hdulist.close()

#redim= np.abs(dark40arr - zeroarr)

#bins= range(0,40)
#plt.hist(np.ravel(redim), bins,normed='True')

xlist = np.arange(-20, 50, 0.01)
ylist = Poisson(xlist, 2.4)
ylist[ylist == np.inf] = 0
print(np.mean(redarr))
zlist = gaussian(np.arange(0, 40, 0.01), 20, 8)
conlist = convolve(ylist, zlist, mode='same') / 100
plt.plot(xlist, conlist)
plt.show()
    def analysis(self):
        """
        For each session, channel
        """

        if self.subject_data is None:
            print('%s: compute or load data first with .load_data()!' % self.subject)

        # open a parallel pool using joblib
        # with Parallel(n_jobs=int(NUM_CORES/2) if NUM_CORES != 1 else 1,  verbose=5) as parallel:

        # loop over sessions
        for session_name, session_grp in self.subject_data.items():
            print('{} processing.'.format(session_grp.name))

            # and channels
            for channel_num, channel_grp in tqdm(session_grp.items()):
                self.res[channel_grp.name] = {}
                self.res[channel_grp.name]['firing_rates'] = {}

                # load behavioral events
                events = pd.read_hdf(self.subject_data.filename, channel_grp.name + '/event')
                events['item_name'] = events.name.apply(lambda x: x.split('_')[1])

                # load eeg for this channel.
                eeg_channel = self._create_eeg_timeseries(channel_grp, events)

                # length of buffer in samples. Used below for extracting smoothed spikes
                # samples = int(np.ceil(float(eeg_channel['samplerate']) * self.buffer))

                # also store region and hemisphere for easy reference. and time
                self.res[channel_grp.name]['region'] = eeg_channel.event.data['region'][0]
                self.res[channel_grp.name]['hemi'] = eeg_channel.event.data['hemi'][0]

                # for each cluster in the channel, summarize the spike activity
                for cluster_num, cluster_grp in channel_grp['spike_times'].items():
                    clust_str = cluster_grp.name.split('/')[-1]

                    # first compute number of spikes at each timepoint and the time in samples when each occurred
                    spike_counts, spike_rel_times = self._create_spiking_counts(cluster_grp, events,
                                                                                eeg_channel.shape[1])

                    # based on the spiking, compute firing rate and normalized firing rate for the default
                    # presentation interval
                    firing_df = self._make_firing_df(eeg_channel.time.data, spike_counts, events)

                    # also smooth firing rate and compute the above info for binned chunks of time
                    kern_width_samples = int(eeg_channel.samplerate.data / (1000/self.kern_width))
                    kern = signal.gaussian(kern_width_samples, self.kern_sd)
                    kern /= kern.sum()
                    smoothed_spike_counts = np.stack([signal.convolve(x, kern, mode='same')
                                                      for x in spike_counts], 0)

                    # now compute the firing rate data for the binned intervals
                    df_binned = []
                    for this_bin in self.binned_intervals:
                        this_df = self._make_firing_df(eeg_channel.time.data, smoothed_spike_counts, events,
                                                       item_interval=this_bin)
                        this_df = pd.concat([this_df], keys=['{}-{}'.format(*this_bin)])
                        df_binned.append(this_df)
                    df_binned = pd.concat(df_binned, axis=0)

                    self.res[channel_grp.name]['firing_rates'][clust_str] = {}
                    self.res[channel_grp.name]['firing_rates'][clust_str]['paired_firing_df'] = firing_df
                    self.res[channel_grp.name]['firing_rates'][clust_str]['binned_paired_firing_df'] = df_binned
    def mix_spc_noi_tt(self):
        """
        (1) load single channel speech / single channel speech
        (2) select speech rir and noise rir
        (3) convolve speech(noise) and speech(noise) rir
        (4) mix multi-channel speech and multi-channel noise
        (5) room(2) * noise(4) * SNR(5)
        :return: save noisy(mix), clean(s1), noise(s2) files / save 'output.csv' file
        """

        # path set
        spc_path = '/home/dail/PycharmProjects/DCCRN/datasets/tr/clean'
        noi_path = '/home/dail/PycharmProjects/DCCRN/datasets/tr/noise'
        snr_list = [-5, 0, 5, 10, 15]
        save_path = os.getcwd() + '/output/tt'
        Path(save_path + '/mix').mkdir(parents=True, exist_ok=True)
        Path(save_path + '/s1').mkdir(parents=True, exist_ok=True)
        Path(save_path + '/s2').mkdir(parents=True, exist_ok=True)

        # multi-channel speech list
        s_list = glob(spc_path + '/*.wav')
        # single-channel noise list
        n_list = glob(noi_path + '/*.wav')

        # make 'output.csv'
        f = open(f'output/tt/output.csv', 'w', newline='')
        wr = csv.writer(f)
        wr.writerow([
            'order', 'speech', 'room', 'speech_rir', 'noise', 'noise_rir',
            'snr'
        ])
        cnt = 0

        for i, s in enumerate(s_list):
            # multi_ch_aud, fs = librosa.core.load(s, sr=None, mono=False)
            # multi_ch_aud_na = os.path.splitext(os.path.basename(s))[0]
            spc, fs = audioread(s)
            spc_na = os.path.splitext(os.path.basename(s))[0]

            # select speech/noise rir
            # np.random.seed(1)
            rand_azi_s = np.random.choice(
                np.concatenate((np.arange(31), np.arange(330, 360)), axis=0))
            # np.random.seed(1)
            rand_azi_n = np.random.choice(np.arange(180, 271))
            rand_r = np.round(np.random.choice(np.linspace(1, 2.2, 5)), 1)
            spc_rir_na = f'az{rand_azi_s}_el0_r{rand_r}'
            noi_rir_na = f'az{rand_azi_n}_el0_r{rand_r}'

            room = ['R4', 'R5']

            # room
            for n in range(2):
                spc_rir = os.getcwd() + f'/rir/tt/{room[n]}/{spc_rir_na}.npz'
                npz_s = np.load(spc_rir, allow_pickle=True)
                rir_s = npz_s['rir']
                multi_ch_spc = ss.convolve(rir_s, spc[:, np.newaxis])
                multi_ch_spc = multi_ch_spc.transpose()

                noi_rir = os.getcwd() + f'/rir/tt/{room[n]}/{noi_rir_na}.npz'
                npz_n = np.load(noi_rir, allow_pickle=True)
                rir_n = npz_n['rir']

                # noise
                for idx_n in range(len(n_list)):

                    noi, fs2 = librosa.core.load(n_list[idx_n], sr=None)
                    noi_na = os.path.splitext(os.path.basename(
                        n_list[idx_n]))[0]
                    assert fs == fs2

                    rand_start = np.random.randint(
                        0, noi.shape[0] - multi_ch_spc.shape[1] - 8191)
                    multi_ch_noi_tmp = ss.convolve(
                        rir_n, noi[rand_start:rand_start +
                                   multi_ch_spc.shape[1] + 8191, np.newaxis])
                    multi_ch_noi = multi_ch_noi_tmp[8191:-8191, :].transpose()

                    # mix speech and noise with SNR
                    # idx_snr = np.random.randint(0, len(snr_list))

                    for l in range(len(snr_list)):
                        cnt = cnt + 1
                        snr = snr_list[l]

                        noisy, clean, noise = self.snr_mix(
                            multi_ch_spc, multi_ch_noi, snr)

                        audiowrite(
                            save_path +
                            f'/mix/noisy_{cnt:#05d}_{noi_na}_{snr}.wav',
                            noisy.transpose(), fs)
                        audiowrite(save_path + f'/s1/clean_{cnt:#05d}.wav',
                                   clean.transpose(), fs)
                        audiowrite(save_path + f'/s2/noise_{cnt:#05d}.wav',
                                   noise.transpose(), fs)

                        wr.writerow([
                            cnt, spc_na, room[n], spc_rir_na, noi_na,
                            noi_rir_na, snr
                        ])

        f.close()
Example #48
0
def convolution_filter(x, filt, nsides=2):
    """
    Linear filtering via convolution. Centered and backward displaced moving
    weighted average.

    Parameters
    ----------
    x : array_like
        data array, 1d or 2d, if 2d then observations in rows
    filt : array_like
        Linear filter coefficients in reverse time-order. Should have the
        same number of dimensions as x though if 1d and ``x`` is 2d will be
        coerced to 2d.
    nsides : int, optional
        If 2, a centered moving average is computed using the filter
        coefficients. If 1, the filter coefficients are for past values only.
        Both methods use scipy.signal.convolve.

    Returns
    -------
    y : ndarray, 2d
        Filtered array, number of columns determined by x and filt. If a
        pandas object is given, a pandas object is returned. The index of
        the return is the exact same as the time period in ``x``

    Notes
    -----
    In nsides == 1, x is filtered ::

        y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]

    where n_filt is len(filt).

    If nsides == 2, x is filtered around lag 0 ::

        y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
               + ... + x[n + n_filt/2]

    where n_filt is len(filt). If n_filt is even, then more of the filter
    is forward in time than backward.

    If filt is 1d or (nlags,1) one lag polynomial is applied to all
    variables (columns of x). If filt is 2d, (nlags, nvars) each series is
    independently filtered with its own lag polynomial, uses loop over nvar.
    This is different than the usual 2d vs 2d convolution.

    Filtering is done with scipy.signal.convolve, so it will be reasonably
    fast for medium sized data. For large data fft convolution would be
    faster.
    """
    # for nsides shift the index instead of using 0 for 0 lag this
    # allows correct handling of NaNs
    if nsides == 1:
        trim_head = len(filt) - 1
        trim_tail = None
    elif nsides == 2:
        trim_head = int(np.ceil(len(filt) / 2.) - 1) or None
        trim_tail = int(np.ceil(len(filt) / 2.) - len(filt) % 2) or None
    else:  # pragma : no cover
        raise ValueError("nsides must be 1 or 2")

    pw = PandasWrapper(x)
    x = array_like(x, 'x', maxdim=2)
    filt = array_like(filt, 'filt', ndim=x.ndim)

    if filt.ndim == 1 or min(filt.shape) == 1:
        result = signal.convolve(x, filt, mode='valid')
    else:  # filt.ndim == 2
        nlags = filt.shape[0]
        nvar = x.shape[1]
        result = np.zeros((x.shape[0] - nlags + 1, nvar))
        if nsides == 2:
            for i in range(nvar):
                # could also use np.convolve, but easier for swiching to fft
                result[:, i] = signal.convolve(x[:, i],
                                               filt[:, i],
                                               mode='valid')
        elif nsides == 1:
            for i in range(nvar):
                result[:, i] = signal.convolve(x[:, i],
                                               np.r_[0, filt[:, i]],
                                               mode='valid')
    result = _pad_nans(result, trim_head, trim_tail)
    return pw.wrap(result)
                  (4 * K * t)) + np.exp(-((x + x_o + v * t - 2 * n * l)**2) /
                                        (4 * K * t))
    I_x += np.exp(-((x - x_o - v * t - 2 * n * l)**2) /
                  (4 * K * t)) + np.exp(-((x + x_o + v * t + 2 * n * l)**2) /
                                        (4 * K * t))

sinks = np.exp(
    -(Q + d + s) * t
)  #numpy array of size len(t). Calculates the effect of ventilation, deactivation and settling sinks

I = 1 / (
    4 * np.pi * K * t
) * I_y * I_x * sinks  #Impulse function at (x,y) - numpy array of size len(t)

#use Scipy Convolve function for S and I to calculate the C at (x,y)
C = convolve(S, I)[0:len(t)] / (h / 2)
#use Scipy cumtrapz function to calculate the dose (array of len(t)-1) at (x,y)
dose = p * cumtrapz(C, t)
#calculate P (array of len(t)-1)
P = 1 - np.exp(-dose * k)

t = t / 60  #convert time-axis to minutes for plotting of graph

P = P * 100  #convert probability to percent for plotting of graph
#Uses Matplotlib.pyplot to produce a line graph P vs t[1:]
plt.plot(t[1:], P)
#Change axis fontsize to 12
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
#label axes
plt.xlabel("Time (min)")
Example #50
0
for i in range(box_width, len(d)-1):
    if i % 1000 == 0:
        print(i)
    
    data = np.array(d[(i - box_width+1):(i+1)])
    sigs = np.zeros((data.shape[0], 3))
    sigs[..., 0] = (data[..., 0] + data[..., 1])/2.0
    sigs[..., 1] = (data[..., 2] + data[..., 3])/2.0
    sigs[..., 2] = (data[..., 4] + data[..., 5])/2.0
    
    # fft_len = np.fft.rfft(data[..., 0]).shape[0]
    features = np.array([])
    
    for j in range(3):
        sig = sigs[..., j]
        conv1 = signal.convolve(sig, wavelet1, 'same')
        conv2 = signal.convolve(sig, wavelet2, 'same')
        fourier = np.fft.fft(sig)
        fourier1 = np.fft.fft(conv1) 
        fourier2 = np.fft.fft(conv2)
        features = np.hstack([features, np.abs(fourier), np.abs(fourier1), np.abs(fourier2)])
        # not sure if this is a good idea -->
        # features = np.hstack([features, np.angle(fourier), np.angle(fourier1), np.angle(fourier2)])


    features_arr[i, ...] = features
        

feature_names = []
for i in range(3):
    feature_names.extend(['c' + str(i) + '_abs_A_' + str(x)
Example #51
0
 def _test():
     convolve(a, b, 'valid')
Example #52
0
def _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None):
    """Fast Fourier transform-based Gaussian kernel density estimate (KDE).

    The code was adapted from https://github.com/mfouesneau/faststats

    Parameters
    ----------
    x : Numpy array or list
    cumulative : bool
        If true, estimate the cdf instead of the pdf
    bw : float
        Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
        smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
        of thumb (the default rule used by SciPy).
    xmin : float
        Manually set lower limit.
    xmax : float
        Manually set upper limit.

    Returns
    -------
    density: A gridded 1D KDE of the input points (x)
    xmin: minimum value of x
    xmax: maximum value of x
    """
    x = np.asarray(x, dtype=float)
    x = x[np.isfinite(x)]
    if x.size == 0:
        warnings.warn("kde plot failed, you may want to check your data")
        return np.array([np.nan]), np.nan, np.nan

    len_x = len(x)
    n_points = 200 if (xmin or xmax) is None else 500

    if xmin is None:
        xmin = np.min(x)
    if xmax is None:
        xmax = np.max(x)

    assert np.min(x) >= xmin
    assert np.max(x) <= xmax

    log_len_x = np.log(len_x) * bw

    n_bins = min(int(len_x ** (1 / 3) * log_len_x * 2), n_points)
    if n_bins < 2:
        warnings.warn("kde plot failed, you may want to check your data")
        return np.array([np.nan]), np.nan, np.nan

    _, grid, _ = histogram(x, n_bins, range_hist=(xmin, xmax))

    scotts_factor = len_x ** (-0.2)
    kern_nx = int(scotts_factor * 2 * np.pi * log_len_x)
    kernel = gaussian(kern_nx, scotts_factor * log_len_x)

    npad = min(n_bins, 2 * kern_nx)
    grid = np.concatenate([grid[npad:0:-1], grid, grid[n_bins : n_bins - npad : -1]])
    density = convolve(grid, kernel, mode="same", method="direct")[npad : npad + n_bins]
    norm_factor = (2 * np.pi * log_len_x ** 2 * scotts_factor ** 2) ** 0.5

    density /= norm_factor

    if cumulative:
        density = density.cumsum() / density.sum()

    return density, xmin, xmax
Example #53
0
 def test_zero_order(self):
     a = 1289
     b = 4567
     c = convolve(a, b)
     assert_array_equal(c, a * b)
Example #54
0
        #del original_data

    additiveout = calibrators[pol].lincal(data[p],
                                          additivein,
                                          nthread=nthread,
                                          verbose=True)
    #######################remove additive###############################
    if removeadditive:
        nadditiveloop = 1
        for i in range(nadditiveloop):
            #subtimer = omni.Timer()
            additivein[:, :, calibrators[pol].Info.
                       subsetbl] = additivein[:, :, calibrators[pol].Info.
                                              subsetbl] + additiveout
            weight = ss.convolve(np.ones(additivein.shape[0]),
                                 np.ones(removeadditiveperiod * 2 + 1),
                                 mode='same')
            #for f in range(additivein.shape[1]):#doing for loop to save memory usage at the expense of negligible time
            #additivein[:,f] = ss.convolve(additivein[:,f], np.ones(removeadditiveperiod * 2 + 1)[:, None], mode='same')/weight[:, None]
            additivein = (
                (sfil.convolve1d(np.real(additivein),
                                 np.ones(removeadditiveperiod * 2 + 1),
                                 mode='constant') +
                 1j * sfil.convolve1d(np.imag(additivein),
                                      np.ones(removeadditiveperiod * 2 + 1),
                                      mode='constant')) /
                weight[:, None, None]).astype('complex64')
            calibrators[pol].computeUBLFit = False
            additiveout = calibrators[pol].lincal(data[p],
                                                  additivein,
                                                  nthread=nthread,
Example #55
0
 def test_basic(self):
     a = [3, 4, 5, 6, 5, 4]
     b = [1, 2, 3]
     c = convolve(a, b)
     assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
Example #56
0
# %%
from scipy import signal
import sympy as sp
import numpy as np

# %%
X = [1, -2, 3, 4]
H = [1, 2, 3]

Y = signal.convolve(X, H)
print("conv", Y)
X = [11, 8, 3, 7, 5, 100, 13, 74, 19]
H = [8, 3, 7]
Y = signal.correlate(X, H)
print("cor", Y)
Xx, rem = signal.deconvolve(Y, H)
print("deconv", Xx)


#%%
def matest(a):
    if type(a) == list:
        return np.array([a])
    else:
        return a


def my_conv(X, H):
    X = matest(X)
    H = matest(H)
z1, p1, k1 = sig.tf2zpk(Gnum, Gden)
z2, p2, k2 = sig.tf2zpk(Anum, Aden)
broot = np.roots(B)

print("Z1:  ", z1)
print("P1:  ", p1)
print("K1:  ", k1)

print("Z2:  ", z2)
print("P2:  ", p2)
print("K2:  ", k2)

print("Broots:  ", broot)

opennum = sig.convolve(Gnum, Anum)
openden = sig.convolve(Gden, Aden)
tout1, yout1 = sig.step((opennum, openden), T=t)

plt.figure(figsize=(10, 10))

plt.plot(tout1, yout1)
plt.title('Part One: Open System')

closenum = sig.convolve(Anum, Gnum)
#den1= sig.convolve(Gnum, B)
#print(len(den1))
#print(len(Gden))
#den2 = den1 + Gden
#closeden= sig.convolve(Aden, den2)
closeden = sig.convolve(Aden, (sig.convolve(Gnum, B) + Gden))
Example #58
0
def main(argv=sys.argv): 

    global eplat, eplon, epdepth, orig
    
    
    GFdir = "/home/roberto/data/GFS/"
    beta = 4.e3 #m/s 
    rho = 3.e3 #kg/m^3 
    mu = rho*beta*beta
    mu =40e9
    
    Lbdm0min = 1e-26*np.array([125.])
    Lbdsmooth = 1e-26*np.array([100.])
    
    #~ Lbdm0min = 1e-26*np.linspace(60.,500,40)
    #~ Lbdsmooth = 1e-26*np.linspace(60.,500,40)#*0.5

    corners = 4.
    fmin = 0.001
    fmax = 0.005
    
    ### Data from Chilean 2010 EQ (Same as W phase inv.) 
    strike = 18.
    dip    = 18.
    rake   = 104. # 109.
    #rake = 45.
    
    rakeA = rake + 45.
    rakeB = rake - 45.
    ####################
    nsx = 21
    nsy = 11
    Min_h = 10.
    flen  = 600. #Fault's longitude [km] along strike
    fwid  = 300. #Fault's longitude [km] along dip
    sflen = flen/float(nsx)
    sfwid = fwid/float(nsy)    
    swp = [1, 0, 2]
    nsf = nsx*nsy
    ###################
    
    t_h     = 10.
    MISFIT = np.array([])
    #RUPVEL = np.arange(1.0, 5.0, 0.05)
    RupVel = 2.1 # Best fit
    #RupVel = 2.25 #From Lay et al.
    
    
    #for RupVel in RUPVEL:
    print "****************************"
    print RupVel
    print "****************************"
    NP = [strike, dip, rake]
    NPA = [strike, dip, rakeA]
    NPB = [strike, dip, rakeB]
    
    M  = np.array(NodalPlanetoMT(NP))  
    MA = np.array(NodalPlanetoMT(NPA)) 
    MB = np.array(NodalPlanetoMT(NPB)) 
    
    Mp = np.sum(M**2)/np.sqrt(2)
    
    
    #############
        #Loading req file and EQparameters
    parameters={}    
    with open(argv[1],'r') as file:
        for line in file:
            line = line.split()
            key = line[0]
            val = line[1:]
            parameters[key] = val 
    #~ cmteplat = float(parameters['eplat'][0])
    #~ cmteplon = float(parameters['eplon'][0])
    #~ cmtepdepth=float(parameters['epdepth'][0])    
    orig = UTCDateTime(parameters['origin_time'][0])
    
    ####Hypocentre from
    ### http://earthquake.usgs.gov/earthquakes/eqinthenews/2010/us2010tfan/    
    cmteplat = -35.91#-35.85#-36.03#-35.83
    cmteplon = -72.73#-72.72#-72.83# -72.67
    cmtepdepth= 35.
    eq_hyp = (cmteplat,cmteplon,cmtepdepth)
      ############

    
    
    grid, sblt = fault_grid('CL-2010',cmteplat,cmteplon,cmtepdepth,0, Min_h,\
                             strike, dip, rake, flen, fwid, nsx, nsy,                            Verbose=False, ffi_io=True, gmt_io=True)
                             
    print ('CL-2010',cmteplat,cmteplon,cmtepdepth,0, Min_h,\
                             strike, dip, rake, flen, fwid, nsx, nsy,\
                            )
    print grid[0][1]
    #sys.exit()
    #############
    #Loading files and setting dirs:
    
    inputfile =  os.path.abspath(argv[1]) 
    if not os.path.exists(inputfile): print inputfile, "does not exist."; exit() 
    workdir = "/".join(inputfile.split("/")[:-1]) 
    basename = inputfile.split("/")[-1][:-4]
    if workdir[-1] != "/": workdir += "/"
    
    try :
        os.mkdir(workdir+"WPinv")
    except OSError:
        pass#print "Directory WPtraces already exists. Skipping"
    
    trfile = open(workdir+"goodtraces.dat")
    trlist = []
    #Loading Good traces files:
    while 1:
        line = trfile.readline().rstrip('\r\n')
        if not line: break        
        trlist.append(line.split()[0])        
    trfile.close()
    #############
    
    # Reading traces:    
    st = read(workdir+"WPtraces/" + basename + ".decov.trim.mseed")  
    #############################################################################
    ######Determining the sf closest to the hypocentre:    
    min_Dist_hyp_subf = flen *fwid
    for subf in range(nsf):
        sblat   = grid[subf][1]
        sblon   = grid[subf][0]
        sbdepth = grid[subf][2]              
        sf_hyp =  (sblat,sblon, sbdepth)        
        Dist_hyp_subf = hypo2dist(eq_hyp,sf_hyp)
        if Dist_hyp_subf < min_Dist_hyp_subf:
            min_Dist_hyp_subf = Dist_hyp_subf
            min_sb_hyp = sf_hyp
            hyp_subf = subf
    print hyp_subf,  min_sb_hyp,  min_Dist_hyp_subf    
    
    
    ####Determining trimming times:
    
    test_tr = read(GFdir + "H003.5/PP/GF.0001.SY.LHZ.SAC")[0]
    t0 = test_tr.stats.starttime
    TrimmingTimes = {}   # Min. Distace from the fault to each station. 
    A =0
    for trid in trlist:     
        tr = st.select(id=trid)[0]
        metafile = workdir + "DATA/" + "META." + tr.id + ".xml"
        META = DU.getMetadataFromXML(metafile)[tr.id]
        stlat = META['latitude']
        stlon = META['longitude'] 
        dist =   locations2degrees(min_sb_hyp[0],min_sb_hyp[1],\
                                   stlat,stlon) 
        parrivaltime = getTravelTimes(dist,min_sb_hyp[2])[0]['time']        
        ta = t0 + parrivaltime
        tb = ta + round(15.*dist) 
        TrimmingTimes[trid] = (ta, tb)
        
    
    ##############################################################################
     
    #####

    DIST = []
    # Ordering the stations in terms of distance
    for trid in trlist: 
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        lat = META['latitude']
        lon = META['longitude']
        trdist = locations2degrees(cmteplat,cmteplon,lat,lon) 
        DIST.append(trdist)   

    
    DistIndex = lstargsort(DIST)
    
    if len(argv) == 3:
        trlist = [argv[2]]
        OneStation = True
    else:
        trlist = [trlist[i] for i in DistIndex]
        OneStation = False
        
   ##### 

    client = Client()
    ObservedDisp = np.array([])   
    gridlat = []
    gridlon = []
    griddepth = []
    sbarea = []
    mindist = flen*fwid # min distance hyp-subfault 
    

    ##########Loop for each subfault
    for subf in range(nsf):
        print "**********"
        print subf
        eplat   = grid[subf][1]
        eplon   = grid[subf][0]           
        epdepth = grid[subf][2]
        
        ## Storing the subfault's location centered in the hypcenter 
        gridlat.append(eplat-cmteplat)
        gridlon.append(eplon-cmteplon)
        griddepth.append(epdepth)
        
        
        strike = grid[subf][3] #+ 360.
        dip    = grid[subf][4]
        rake   = grid[subf][5] #     
        NP = [strike, dip, rake]
        
        M = np.array(NodalPlanetoMT(NP))   

        
                #Calculating the time dalay:
            
        sf_hyp = (eplat,eplon, epdepth)        
        Dist_ep_subf = hypo2dist(eq_hyp,sf_hyp)
        t_d = round(Dist_ep_subf/RupVel) #-59.
        print eplat,eplon, epdepth
    
        #t_d  = 0.
        
    
        # Determining depth dir:
        depth = []
        depthdir = []
        for file in os.listdir(GFdir):
            if file[-2:] == ".5":
                depthdir.append(file)
                depth.append(float(file[1:-2]))            
        BestDirIndex = np.argsort(abs(epdepth-np.array(depth)))[0]      
        hdir = GFdir + depthdir[BestDirIndex] + "/"   
        # hdir is the absolute path to the closest deepth. 
        
        
        
        SYN = np.array([])
        SYNA = np.array([])
        SYNB = np.array([])

        
        #Main loop :
        for trid in trlist:  
                       
            tr = st.select(id=trid)[0]    
            metafile = workdir + "DATA/" + "META." + tr.id + ".xml"
            META = DU.getMetadataFromXML(metafile)[tr.id]
            lat = META['latitude']
            lon = META['longitude']    
            trPPsy,  trRRsy, trRTsy,  trTTsy = \
                                   GFSelectZ(lat,lon,hdir) 
            
            tr.stats.delta = trPPsy.stats.delta
            azi =   -np.pi/180.*gps2DistAzimuth(lat,lon,\
                                               eplat,eplon)[2]
            trROT = MTrotationZ(azi, trPPsy,  trRRsy, trRTsy,  trTTsy)        
                        
            
                    #Triangle 
            dt = trROT[0].stats.delta          
            trianglen = 2.*t_h/dt-1.
            window = triang(trianglen)
            window /= np.sum(window)
            #window = np.array([1.])
            
            FirstValid = int(trianglen/2.) + 1
            dist =   locations2degrees(eplat,eplon,lat,lon) 
            parrivaltime = getTravelTimes(dist,epdepth)[0]['time']
            
            t1 = TrimmingTimes[trid][0] - t_d
            t2 = TrimmingTimes[trid][1] - t_d
            
   
            #~ t1 = trROT[0].stats.starttime + parrivaltime- t_d
            #~ t2 = t1+ round(MinDist[tr.id]*15. )
                           
           
            N = len(trROT[0])
            for trR in trROT:
                trR.data *= 10.**-21 ## To get M in Nm                   
                trR.data -= trR.data[0]
                AUX1 = len(trR)
                trR.data = convolve(trR.data,window,mode='valid') 
                AUX2 = len(trR)
                mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #mean = np.mean(trR.data[:60])
                trR.data -= mean      
                trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.                                              delta,
                corners , 1 , fmin, fmax)  
                t_l = dt*0.5*(AUX1 - AUX2)                             
                trR.trim(t1-t_l,t2-t_l, pad=True, fill_value=trR.data[0])  #We lost t_h due to the convolution                  
            
            
         
            #~ for trR in trROT:
                #~ trR.data *= 10.**-23 ## To get M in Nm
                #~ trR.data -= trR.data[0]                
                #~ trR.data = convolve(trR.data,window,mode='same')
                #~ # mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                             #~ # trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #~ mean = np.mean(trR.data[:60])               
                #~ trR.data -= mean
                #~ trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             #~ corners ,1 , fmin, fmax)
                #~ trR.trim(t1,t2,pad=True, fill_value=trR.data[0])  

            nmin = min(len(tr.data),len(trROT[0].data))             
            tr.data = tr.data[:nmin]
            for trR in trROT:
                trR.data = trR.data[:nmin]
              
                
             #############            
            trROT = np.array(trROT)  
            syn  =  np.dot(trROT.T,M) 
            synA =  np.dot(trROT.T,MA)
            synB =  np.dot(trROT.T,MB)
            
            SYN = np.append(SYN,syn)  
            SYNA = np.append(SYNA,synA)
            SYNB = np.append(SYNB,synB)
            
            if subf == 0 : ObservedDisp =  np.append(ObservedDisp,tr.data,0) 
            
  
        sbarea.append(grid[subf][6])
   
        print np.shape(A), np.shape(np.array([SYN]))
        if subf == 0: 
            A  = np.array([SYN])
            AA = np.array([SYNA])
            AB = np.array([SYNB])
        else:
            A = np.append(A,np.array([SYN]),0) 
            AA = np.append(AA,np.array([SYNA]),0)
            AB = np.append(AB,np.array([SYNB]),0)
        
    
    
    #Full matrix with the two rake's component
    AC = np.vstack((AA,AB))

#MISFIT = np.array([])
########## Stabilizing the solution:         

#### Moment minimization:
#~ constraintD  = np.zeros(nsf)
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*np.eye(nsf,nsf)         
    #~ Acons = np.append(A,constraintF,1)   
    #~ print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(Acons.T,ObservedDispcons)
    #~ M = R[0]
    #~ #M = np.zeros(nsf)
    #~ #M[::2] = 1
    #~ fit = np.dot(A.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    
    #~ MISFIT = np.append(MISFIT,misfit)
#~ plt.figure()
#~ plt.plot(Lbd,MISFIT)
#~ ###########################################
#~ ### Smoothing:
#~ constraintF_base = SmoothMatrix(nsx,nsy)
#~ constraintD  = np.zeros(np.shape(constraintF_base)[0])
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*constraintF_base   
    #~ Acons = np.append(A,constraintF.T,1)   
    #~ #print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(Acons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(A.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    #~ print lbd, misfit
    #~ MISFIT = np.append(MISFIT,misfit)
#~ ###########################################    
###########################################
#~ ##### Moment Minimization (including rake projections):
#~ constraintD  = np.zeros(2*nsf)
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*np.eye(2*nsf,2*nsf)         
    #~ ACcons = np.append(AC,constraintF,1)   
    #~ print np.shape(ACcons.T), np.shape(ObservedDispcons)
    #~ R = nnls(ACcons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(AC.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))        
    #~ MISFIT = np.append(MISFIT,misfit)  
    #~ M = np.sqrt(M[:nsf]**2+M[nsf:]**2)

##############################################
### Smoothing (including rake projections):
#~ constraintF_base = SmoothMatrix(nsx,nsy)
#~ Nbase = np.shape(constraintF_base)[0]
#~ constraintD  = np.zeros(2*Nbase)
#~ constraintF_base_big = np.zeros((2*Nbase, 2*nsf))
#~ constraintF_base_big[:Nbase,:nsf]= constraintF_base
#~ constraintF_base_big[Nbase:,nsf:]= constraintF_base 
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*constraintF_base_big   
    #~ ACcons = np.append(AC,constraintF.T,1)   
    #~ #print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(ACcons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(AC.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    #~ print lbd, misfit
    #~ MISFIT = np.append(MISFIT,misfit)
#~ M = np.sqrt(M[:nsf]**2+M[nsf:]**2)    
###########################################    
#~ ##### Moment Minimization and  Smoothing
  #~ #### (including rake projections):
    #~ mom0 = []
    #~ constraintF_base = SmoothMatrix(nsx,nsy)
    #~ Nbase = np.shape(constraintF_base)[0]
    #~ constraintDsmoo = np.zeros(2*Nbase)
    #~ constraintDmin  = np.zeros(2*nsf)
    #~ constraintF_base_big = np.zeros((2*Nbase, 2*nsf))
    #~ constraintF_base_big[:Nbase,:nsf]= constraintF_base
    #~ constraintF_base_big[Nbase:,nsf:]= constraintF_base 
    #~ ObservedDispcons = np.concatenate((ObservedDisp,
                                  #~ constraintDmin,
                             #~ constraintDsmoo  ))    
   
    #~ for lbdm0 in Lbdm0min:
        #~ constraintFmin  = lbdm0*np.eye(2*nsf,2*nsf)
        #~ for lbdsm in Lbdsmooth:              
            #~ constraintFsmoo  = lbdsm*constraintF_base_big 
            #~ ACcons = np.hstack((AC, constraintFmin, constraintFsmoo.T))   
            #~ print lbdm0, lbdsm
            #~ R = nnls(ACcons.T,ObservedDispcons)
            #~ M = R[0]
            #~ fit = np.dot(AC.T,M)
            #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
                     #~ /np.sum(np.abs(ObservedDisp))        
            #~ MISFIT = np.append(MISFIT,misfit) 
            #~ MA = M[:nsf]
            #~ MB = M[nsf:]
            #~ M = np.sqrt(MA**2+MB**2)
            #~ mom0.append(np.sum(M))
    ##############################################

    # Rotation to the rake's conventional angle:  
    #MB, MA = Rot2D(MB,MA,-rakeB)
    print np.shape(M), np.shape(A.T)
    R = nnls(A.T,ObservedDisp)
    M = R[0]
    
    #~ M = np.zeros(nsf)
    #~ M[::2] = 1 
    fit = np.dot(A.T,M)
    MA = M
    MB = M
    
    np.save("RealSol", M)
      
    nm0 = np.size(Lbdm0min) 
    nsmth = np.size(Lbdsmooth)
    #~ plt.figure()
    #~ plt.pcolor(1./Lbdsmooth, 1./Lbdm0min,MISFIT.reshape(nm0,nsmth))
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel(r'$1/ \lambda_{1}$',fontsize = 24 )
    #~ plt.ylim((1./Lbdm0min).min(),(1./Lbdm0min).max() )
    #~ plt.ylim((1./Lbdsmooth).min(),(1./Lbdsmooth).max() )
    #~ cbar = plt.colorbar()
    #~ cbar.set_label("Misfit %")
    #~ print np.shape(Lbdm0min), np.shape(mom0)
    
    #~ plt.figure()
    #~ CS = plt.contour(1./Lbdsmooth, 1./Lbdm0min,MISFIT.reshape(nm0,nsmth) )
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel(r'$1/ \lambda_{1}$',fontsize = 24 )
    #~ plt.clabel(CS, inline=1, fontsize=10)
    #~ plt.title('Misfit')
    
    
    
    #~ plt.figure()
    #~ plt.plot(1./Lbdm0min,MISFIT)
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel("Misfit %")
    #~ plt.figure()
    #~ plt.plot(Lbdm0min,mom0)
    #~ plt.ylabel(r'$M_0\, [Nm]$', fontsize = 24)
    #~ plt.xlabel(r'$\lambda_{M0}$', fontsize = 24)

   
    misfit = 100.*np.sum(np.abs(fit-ObservedDisp))/np.sum(np.abs(ObservedDisp))
    print "Residual: ", 1000.*R[1]
    print misfit
    
    
    #SLIP = M*Mp/mu/(1.e6*np.array(sbarea))
    
    sbarea = sflen*sfwid
    SLIP = M/(mu*1.e6*sbarea)
    SLIP = SLIP.reshape(nsx,nsy).T[::-1]
    moment = M.reshape(nsx,nsy).T[::-1]
    
    plt.figure(figsize = (13,5))
    plt.plot(fit,'b' ,label="Fit")
    plt.plot(ObservedDisp,'r',label="Observed")
    plt.xlabel("Time [s]")
    plt.ylabel("Displacement [m]")
    plt.legend()
    
    
    np.set_printoptions(linewidth=1000,precision=3)
    print "***********"
    print sbarea
    print SLIP
    print np.mean(SLIP)
    print "Moment:"
    print np.sum(M)
 

    ### SLIPS Distribution (as the synthetics) :
    SLIPS = M.reshape(nsx,nsy).T
    SLIPS /=  mu*1.e6*sbarea
    
    
    #~ #########Ploting slip distribution:
    #~ #we are going to reflect the y axis later, so:
    hypsbloc = [hyp_subf / nsy , -(hyp_subf % nsy) - 2]

    #Creating the strike and dip axis:
    StrikeAx= np.linspace(0,flen,nsx+1)
    DipAx= np.linspace(0,fwid,nsy+1)
    DepthAx = DipAx*np.sin(np.pi/180.*dip) + Min_h
    print DepthAx
    hlstrike = StrikeAx[hypsbloc[0]] + sflen*0.5
    #we are going to reflect the axis later, so:
    hldip = DipAx[hypsbloc[1]] + sfwid*0.5 
    hldepth = DepthAx[hypsbloc[1]] + sfwid*0.5*np.sin(np.pi/180.*dip)    
    
    StrikeAx = StrikeAx - hlstrike
    DipAx =     DipAx   - hldip
    
    XX, YY = np.meshgrid(StrikeAx, DepthAx)
    XX, ZZ = np.meshgrid(StrikeAx, DipAx )  

   ######Plot: (Old colormap: "gist_rainbow_r")
    plt.figure(figsize = (13,6))
    ax = host_subplot(111)
    im = ax.pcolor(XX, YY, SLIPS, cmap="jet")    
    ax.set_ylabel('Depth [km]')       
    ax.set_ylim(DepthAx[-1],DepthAx[0])  
    
    # Creating a twin plot 
    ax2 = ax.twinx()
    im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="jet")    
    ax2.set_ylabel('Distance along the dip [km]')
    ax2.set_xlabel('Distance along the strike [km]')    
    ax2.set_ylim(DipAx[0],DipAx[-1])
    ax2.set_xlim(StrikeAx[0],StrikeAx[-1])       
                         
                         
    ax.axis["bottom"].major_ticklabels.set_visible(False) 
    ax2.axis["bottom"].major_ticklabels.set_visible(False)
    ax2.axis["top"].set_visible(True)
    ax2.axis["top"].label.set_visible(True)
    
    
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]")             
    ax2.plot([0], [0], '*', ms=225./(nsy+4))
    ax2.set_xticks(ax2.get_xticks()[1:-1])

    
    #~ ### Rake plot:
    plt.figure(figsize = (13,6))
    fig = host_subplot(111)
    XXq, ZZq = np.meshgrid(StrikeAx[:-1]+sflen, DipAx[:-1]+sfwid )
    Q = plt.quiver(XXq,ZZq, MB.reshape(nsx,nsy).T[::-1,:]/(mu*1.e6*sbarea), 
                    MA.reshape(nsx,nsy).T[::-1,:]/(mu*1.e6*sbarea),
                    SLIPS[::-1,:],
                units='xy',scale = 0.5  ,  linewidths=(2,), 
                edgecolors=('k'), headaxislength=5  )
    fig.set_ylim([ZZq.min()-80,ZZq.max()+80])
    fig.set_xlim([XXq.min()-20, XXq.max()+20 ])
    fig.set_ylabel('Distance along dip [km]') 
    fig.set_xlabel('Distance along the strike [km]') 
    
    fig2 = fig.twinx()
    fig2.set_xlabel('Distance along the strike [km]') 
    
    fig.axis["bottom"].major_ticklabels.set_visible(False) 
    fig.axis["bottom"].label.set_visible(False)
    fig2.axis["top"].set_visible(True)
    fig2.axis["top"].label.set_visible(True)
    fig2.axis["right"].major_ticklabels.set_visible(False)

    divider = make_axes_locatable(fig)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]") 
    

    
    
    plt.show()

    
        #############
    #~ print np.shape(MISFIT),  np.shape(RUPVEL)
    #~ plt.figure()
    #~ plt.plot(RUPVEL,MISFIT)
    #~ plt.xlabel("Rupture Velocity [km/s]")
    #~ plt.ylabel("Misfit %")
    #~ plt.show()
     

    print np.shape(MB.reshape(nsx,nsy).T)
    print np.shape(ZZ)
def get_ns_power(df, n=3):
    if 'Watts' not in df.columns:
        print('No "Watts" column in DataFrame')
        return np.zeros(df.index.values.shape)
    return convolve(df['Watts'], boxcar(n), 'same') / n
Example #60
0
def digit_baseband():

    Ts = 1
    N_sample = 8  # 每个码元的抽样点数
    dt = Ts / N_sample  # 抽样时间间隔
    N = 1000  # 码元数
    T = 1
    t = np.arange(0, N * N_sample * dt, dt)

    gt1 = np.ones((1, N_sample))  # NRZ非归零波形

    gt2 = np.ones((1, N_sample // 2))  # RZ归零波形

    gt2 = np.hstack((gt2, np.zeros((1, N_sample // 2))))

    mt3 = sinc((t - 5) / Ts).reshape(1,
                                     -1)  #  sin(pi * t / Ts) / (pi * t / Ts)

    gt3 = mt3[0:10 * N_sample].reshape((1, -1))

    d = (np.sign(np.random.randn(1, N)) + 1) / 2

    data = sigexpand(d, N_sample)  # 对序列间隔插入N_sample - 1个0

    st1 = convolve(data, gt1)

    st2 = convolve(data, gt2)

    d = 2 * d - 1  #变成双极性序列

    data = sigexpand(d, N_sample)

    st3 = convolve(data, gt3)

    f1, st1f = T2F(t, st1[:len(t)])

    f2, st2f = T2F(t, st2[:len(t)])

    f3, st3f = T2F(t, st3[:len(t)])

    fig1 = plt.figure(1, figsize=(13, 8), dpi=98)
    fig1.add_subplot(321)
    plt.subplot(3, 2, 1, ylabel='单极性NRZ波形')
    plt.xlim(0, 20)  # axis([0 20 - 1.5 1.5]);
    plt.ylim(-2, 2)  # axis([0 20 - 1.5 1.5]);
    plt.grid("-.")

    plt.plot(t, st1[0][:len(t)])

    fig1.add_subplot(322)
    plt.grid("-.")
    plt.xlim(-5, 5)  # axis([0 20 - 1.5 1.5]);
    plt.ylim(-40, 10)
    plt.plot(f1, 10 * np.log10(abs(st1f)**2 / T))

    fig1.add_subplot(323, ylabel='单极性RZ波形')
    plt.grid("-.")
    plt.xlim(0, 20)
    plt.ylim(-1.5, 1.5)
    plt.plot(t, st2[0][:len(t)])

    fig1.add_subplot(324, ylabel='单极性RZ功率谱密度(dB/Hz)')
    plt.grid("-.")
    plt.xlim(-5, 5)
    plt.ylim(-40, 10)
    plt.plot(f2, 10 * np.log10(abs(st2f)**2 / T))

    fig1.add_subplot(325, ylabel='双极性sinc波形', xlabel='t/Ts')

    plt.plot(t - 5, st3[0][:len(t)])
    plt.grid("-.")
    plt.xlim(0, 20)
    plt.ylim(-2, 2)

    fig1.add_subplot(326, ylabel='sinc波形功率谱密度(dB/Hz)', xlabel='f*Ts')
    plt.grid("-.")
    plt.xlim(-5, 5)
    plt.ylim(-40, 10)
    plt.plot(f3[:-1], 10 * np.log10(abs(st3f)**2 / T))
    plt.show()