def noiseFilter(data_in):

	N = int(np.ceil((4 / b)))
	if not N % 2: N += 1  # Make sure that N is odd.
	n = np.arange(N)
	 
	# Compute a low-pass filter with cutoff frequency fH.
	hlpf = np.sinc(2 * fH * (n - (N - 1) / 2.))
	hlpf *= np.blackman(N)
	hlpf = hlpf / np.sum(hlpf)
	 
	# Compute a high-pass filter with cutoff frequency fL.
	hhpf = np.sinc(2 * fL * (n - (N - 1) / 2.))
	hhpf *= np.blackman(N)
	hhpf = hhpf / np.sum(hhpf)
	hhpf = -hhpf
	hhpf[(N - 1) / 2] += 1
	 
	# Convolve both filters.
	h = np.convolve(hlpf, hhpf)
	s = np.convolve(data_in, hlpf)

	fig, ax = plt.subplots()
	ax.plot(data_in)
	plt.show()
	fig1, ax1 = plt.subplots()
	ax1.plot(s)
	plt.show()
	return s
def calculate_couplings_levine(dt: float, w_jk: Matrix,
                               w_kj: Matrix) -> Matrix:
    """
    Compute the non-adiabatic coupling according to:
    `Evaluation of the Time-Derivative Coupling for Accurate Electronic
    State Transition Probabilities from Numerical Simulations`.
    Garrett A. Meek and Benjamin G. Levine.
    dx.doi.org/10.1021/jz5009449 | J. Phys. Chem. Lett. 2014, 5, 2351−2356
    """
    # Orthonormalize the Overlap matrices
    w_jk = np.linalg.qr(w_jk)[0]
    w_kj = np.linalg.qr(w_kj)[0]

    # Diagonal matrix
    w_jj = np.diag(np.diag(w_jk))
    w_kk = np.diag(np.diag(w_kj))

    # remove the values from the diagonal
    np.fill_diagonal(w_jk, 0)
    np.fill_diagonal(w_kj, 0)

    # Components A + B
    acos_w_jj = np.arccos(w_jj)
    asin_w_jk = np.arcsin(w_jk)

    a = acos_w_jj - asin_w_jk
    b = acos_w_jj + asin_w_jk
    A = - np.sin(np.sinc(a))
    B = np.sin(np.sinc(b))

    # Components C + D
    acos_w_kk = np.arccos(w_kk)
    asin_w_kj = np.arcsin(w_kj)

    c = acos_w_kk - asin_w_kj
    d = acos_w_kk + asin_w_kj
    C = np.sin(np.sinc(c))
    D = np.sin(np.sinc(d))

    # Components E
    w_lj = np.sqrt(1 - (w_jj ** 2) - (w_kj ** 2))
    w_lk = -(w_jk * w_jj + w_kk * w_kj) / w_lj
    asin_w_lj = np.arcsin(w_lj)
    asin_w_lk = np.arcsin(w_lk)
    asin_w_lj2 = asin_w_lj ** 2
    asin_w_lk2 = asin_w_lk ** 2

    t1 = w_lj * w_lk * asin_w_lj
    x1 = np.sqrt((1 - w_lj ** 2) * (1 - w_lk ** 2)) - 1
    t2 = x1 * asin_w_lk
    t = t1 + t2
    E_nonzero = 2 * asin_w_lj * t / (asin_w_lj2 - asin_w_lk2)

    # Check whether w_lj is different of zero
    E1 = np.where(np.abs(w_lj) > 1e-8, E_nonzero, np.zeros(A.shape))

    E = np.where(np.isclose(asin_w_lj2, asin_w_lk2), w_lj ** 2, E1)

    cte = 1 / (2 * dt)
    return cte * (np.arccos(w_jj) * (A + B) + np.arcsin(w_kj) * (C + D) + E)
예제 #3
0
파일: curve.py 프로젝트: ericuni/note
def marker():
	X = np.linspace(-6, 6, 1024)
	Y1 = np.sinc(X)
	Y2 = np.sinc(X) + 1
	plt.plot(X, Y1, marker = 'o', color = '.75')
	plt.plot(X, Y2, marker = 'o', color = 'k', markevery = 32)
	plt.show()
예제 #4
0
def kernels(image, grad, r3, coefs):
    sig1, sig01, sig11, sig21, sig31 = coefs
    k_adj = 0
    N2 = image.n_pe
    N1 = image.N1
    ko = np.zeros((N2,N1,N1), 'F')
    kp = np.zeros((N2,N2,N1), 'F')
    
    Tf = float(image.T_flat); Tr = float(image.T_ramp); T0 = float(image.T0)
    Tl = 2*Tr + Tf
    delT = (Tl-2*T0)/N1
    #delT = image.dwell_time/1e3
    Lx = image.fov_x; Ly = image.fov_y
    
    a0 = grad.gmaG0*(sig01 + r3*sig31)/(2*np.pi)
    a1 = grad.gmaG0*Lx*sig11/(2*np.pi)
    a2 = grad.gmaG0*Ly*sig21/(2*np.pi)


    tn = np.arange(N1)*delT + T0
    n1p = np.arange(-N1/2, N1/2)
    for n2 in range(N2):
        k1t = gtranslate(grad.kx, tn-sig1) * grad.gmaG0/(2*np.pi)
        g1t = gtranslate(grad.gx, tn-sig1)
        ko[n2] = np.sinc(n1p - (Lx*k1t[:,None]-k_adj) + a1*g1t)
        # multiply a0 term to diagonal of n2'th matrix
        ko.flat[n2*(N1*N1):(n2+1)*(N1*N1):(N1+1)] *= np.exp(1j*a0*g1t)
        for n2p in range(N2):
            kp[n2,n2p] = np.sinc(n2p - n2 + a2*g1t)
        
        tn += Tl
        #k_adj = k_adj ^ 1
    return ko, kp
예제 #5
0
def compute_premultiplier(N, kernel, kernsize, scale=512):
    krange = int(N / 2)
    koffset = int((N / 2) * scale)

    x = np.arange(-scale * krange, scale * krange) / float(scale)
    if kernel == 'lanczos':
        a = kernsize / 2
        k = np.sinc(x) * np.sinc(x / a) * (np.abs(x) <= a)
    elif kernel == 'sinc':
        a = kernsize / 2.0
        k = np.sinc(x) * (np.abs(x) <= a)
    elif kernel == 'linear':
        assert kernsize == 2
        k = np.maximum(0.0, 1 - np.abs(x))
    elif kernel == 'quad':
        assert kernsize == 3
        k = (np.abs(x) <= 0.5) * (1 - 2 * x**2) + \
            ((np.abs(x) < 1) * (np.abs(x) > 0.5)) * 2 * (1 - np.abs(x))**2
    else:
        assert False, 'Unknown kernel type'

    sk = np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(k))).real
    if N % 2 == 0:
        premult = 1.0 / (N * sk[(koffset - krange):(koffset + krange)])
    else:
        premult = 1.0 / (N * sk[(koffset - krange - 1):(koffset + krange)])

    return premult
예제 #6
0
def test():
    import numpy as np

    test_fn = "test.h5"
    test_f = h5py.File(test_fn, 'w')
    A = test_f.create_group('group A1')
    B = test_f.create_group('group A2')
    C = test_f.create_group('group A11')
    B['Simple Data'] = range(25)
    xs, ys = np.mgrid[-25:25, -25:25]
    rs = np.sqrt(xs ** 2 + ys ** 2)
    C['Image Data'] = np.sinc(rs)
    C['R Values'] = rs
    A.attrs['words'] = 'bonobo bonanza'
    A.attrs['number'] = 42
    C['Image Data'].attrs['dataset attr'] = 15.5
    C['axis1'] = np.linspace(-10, 10, 50)
    ts, xs, ys = np.mgrid[0:100, -50:50, -50:50]
    rs = np.sqrt(xs ** 2 + ys ** 2)
    C['Movie Data'] = np.sinc(rs - ts) * np.exp(-ts / 100)

    xs = A['xs'] = np.linspace(0, 10, 300)
    A['sin(xs)'] = np.sin(xs)
    #A['sin(xs)'].dims.create_scale(A['xs'], "The X Axis Label")
    #A['sin(xs)'].dims[0].attach_scale(A['xs'])
    main(test_fn)
예제 #7
0
def lanczos(U,n=10,n2=None):
    """Generate Lanczos kernel for a given shift.
    """
    if n2 is None:
        n2 = n
    siz = size(U)
    H = None
    if (siz == 2):
        U_in = copy(U)
        if len(U.shape)==1:
            U_in = zeros((1,2))
            U_in[0,0]=U[0]
            U_in[0,1]=U[1]
        H = zeros((2*n+1,2*n2+1))
        if (U_in[0,0] == 0) and (U_in[0,1] == 0):
            H[n,n2] = 1
        else:
            i=0
            j=0
            for i in range(0,2*n+1):
                for j in range(0,2*n2+1):
                    H[i,j] = sinc(U_in[0,0]-(i-n))*sinc((U_in[0,0]-(i-n))/n)*sinc(U_in[0,1]-(j-n))*sinc((U_in[0,1]-(j-n))/n)

    else :
        H = zeros((2*n+1,))
        for i in range(0,2*n):
            H[i] = sinc(pi*(U-(i-n)))*sinc(pi*(U-(i-n))/n)
    return H
예제 #8
0
파일: utils.py 프로젝트: demisjohn/EMpy
 def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
     """Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
     nood = 2 * n + 1
     hmax = nood - 1
     if not anisotropic:
         # isotropic
         rix1 = self.mat1.n(wl)
         rix2 = self.mat2.n(wl)
         f = self.dc
         h = numpy.arange(-hmax, hmax + 1)
         EPS = (rix1 ** 2 - rix2 ** 2) * f * \
             numpy.sinc(h * f) + rix2 ** 2 * (h == 0)
         EPS1 = (rix1 ** -2 - rix2 ** -2) * f * \
             numpy.sinc(h * f) + rix2 ** -2 * (h == 0)
         return EPS, EPS1
     else:
         # anisotropic
         EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
         EPS1 = numpy.zeros_like(EPS)
         eps1 = numpy.squeeze(
             self.mat1.epsilonTensor(wl)) / EMpy.constants.eps0
         eps2 = numpy.squeeze(
             self.mat2.epsilonTensor(wl)) / EMpy.constants.eps0
         f = self.dc
         h = numpy.arange(-hmax, hmax + 1)
         for ih, hh in enumerate(h):
             EPS[:, :, ih] = (eps1 - eps2) * f * \
                 numpy.sinc(hh * f) + eps2 * (hh == 0)
             EPS1[:, :, ih] = (
                 scipy.linalg.inv(eps1) - scipy.linalg.inv(eps2)
             ) * f * numpy.sinc(hh * f) + scipy.linalg.inv(eps2) * (hh == 0)
         return EPS, EPS1
def _firls_even(numtaps, bands, desired):
    
    # This function implements an algorithm similar to the one of the
    # SciPy `firls` function, but for even-length filters rather than
    # odd-length ones. See paper notes entitled "Least squares FIR
    # filter design for even N" for derivation. The derivation is
    # similar to that of Ivan Selesnick's "Linear-Phase FIR Filter
    # Design By Least Squares" (available online at
    # http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7),
    # with due alteration of detail for even filter lengths.
    
    bands.shape = (-1, 2)
    desired.shape = (-1, 2)
    weights = np.ones(len(desired))
    M = int(numtaps / 2)
    
    # Compute M x M matrix Q (actually twice Q).
    n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
    q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weights)
    Q1 = linalg.toeplitz(q[:M])
    Q2 = linalg.hankel(q[1:M + 1], q[M:])
    Q = Q1 + Q2
    
    # Compute M-vector b.
    k = np.arange(M) + .5
    e = bands[1]
    b = np.diff(e * np.sinc(e * k[:, np.newaxis])).reshape(-1)
    
    # Compute a (actually half a).
    a = np.dot(linalg.pinv(Q), b)
    
    # Compute h.
    h = np.concatenate((np.flipud(a), a))
    
    return h
예제 #10
0
def _aldkrls_ex0():

    iN_training = 1e2  # Size of the training data
    iN_test = 1e3      # Size of the test data
    iSNR = 20          # Noise in the training data [SNR in dB]

    # Generate training data
    vX = np.random.randn(iN_training)
    vY = np.sinc(vX)
    vY = vY + np.sqrt(10**(-iSNR/10)*np.var(vY,ddof=1))*np.random.randn(iN_training)

    # Generate test data
    iStep = (np.max(vX) - np.min(vX))/iN_test
    vX_test = np.arange(np.min(vX),np.max(vX),iStep)
    vY_test = np.sinc(vX_test)

    # -----------------------------------------------------------------
    # Train the algorithm
    dAldKRLS = krls.aldkrls.init('gauss')      # Construct a 'aldKRLSL' object
    for i in np.arange(iN_training):           # Training loop (loop over the all training samples)
        dAldKRLS = krls.aldkrls.train(dAldKRLS , vX[i], vY[i])

    # Evaluate the algorithm
    vY_est = krls.aldkrls.evaluate(dAldKRLS, vX_test)

    # -----------------------------------------------------------------
    # Plot
    hFig1 = plt.figure(1)
    hSubPlot1 = hFig1.add_subplot(111)
    hSubPlot1.grid(True)
    hSubPlot1.plot(vX_test, vY_est,'-b',label='Generated by the algorithms')
    hSubPlot1.plot(vX_test, vY_test,'-g',label='Correct')
    vY = vY[np.argsort(vX)]; vX = np.sort(vX); hSubPlot1.plot(vX, vY,'-*r',label='Training')
    hSubPlot1.legend()
    plt.show(block=True)
예제 #11
0
파일: misc.py 프로젝트: embray/poppy
def sinc2_2d( width=1.0, height=None, wavelength=1e-6, shape=(512,512), pixelscale=0.010, 
        obscuration=0.0, center=None):
    """
    Parameters
    -----------
    width : float
        Width in meters of the aperture
    height : float, optional
        height in meters of the aperture. If not specified, the aperture is assumed 
        to be a square so height=width

    """

    if height== None:  height=width
    halfwidth  = float(width)/2
    halfheight = float(height)/2

    if center is None:
        center = (np.asarray(shape)-1.)/2
    y, x = np.indices(shape, float)
    y -= center[0]
    x -= center[1]
    y *= pixelscale
    x *= pixelscale

    k = 2*np.pi / wavelength # wavenumber
    alpha = k * x * halfwidth  * _ARCSECtoRAD
    beta  = k * y * halfheight * _ARCSECtoRAD

    psf = (np.sinc(alpha))**2 * (np.sinc(beta))**2

    return psf
예제 #12
0
def NoddFcn(F, M, W, L):  # N is odd
    # Variables :
    b0 = 0
    m = n.array(range(int(L + 1)))
    k = m[1:len(m)]
    b = n.zeros(k.shape)

    # Run Loop :
    for s in range(0, len(F), 2):
        m = (M[s + 1] - M[s]) / (F[s + 1] - F[s])
        b1 = M[s] - m * F[s]
        b0 = b0 + (b1 * (F[s + 1] - F[s]) + m / 2 * (
            F[s + 1] * F[s + 1] - F[s] * F[s])) * abs(
            n.square(W[round((s + 1) / 2)]))
        b = b + (m / (4 * pi * pi) * (
            n.cos(2 * pi * k * F[s + 1]) - n.cos(2 * pi * k * F[s])
        ) / (k * k)) * abs(n.square(W[round((s + 1) / 2)]))
        b = b + (F[s + 1] * (m * F[s + 1] + b1) * n.sinc(2 * k * F[s + 1]) - F[
            s] * (m * F[s] + b1) * n.sinc(2 * k * F[s])) * abs(n.square(
                W[round((s + 1) / 2)]))

    b = n.insert(b, 0, b0)
    a = (n.square(W[0])) * 4 * b
    a[0] = a[0] / 2
    aud = n.flipud(a[1:len(a)]) / 2
    a2 = n.insert(aud, len(aud), a[0])
    h = n.concatenate((a2, a[1:] / 2))

    return h
예제 #13
0
def zinc(f, m=64, n=1):
    """Calculate the magnitude response of a cascade of ``n`` ``m``-th order comb filters.

    The magnitude of the filter response is calculated mathematically as:

    .. math::

        \\left|H(f)\\right| = \\left|\\frac{\\mathrm{sinc}(m f)}{\\mathrm{sinc}(f)}\\right|^n

    **Parameters:**

    f : ndarray
        The frequencies at which the magnitude response is evaluated.

    m : int, optional
        The order of the comb filters.

    n : int, optional
        The number of comb filters in the cascade.

    **Returns:**

    HM : ndarray
        The magnitude of the frequency response of the cascade filter.

    """
    return np.fabs(np.sinc(m * f) / np.sinc(f)) ** n
예제 #14
0
def _weight_lanczos_2D(x,y,l,cutoff):
    """ Working on
    """
    #c=cutoff
    r=(x**2+y**2)**0.5
    w=np.sinc(r/cutoff)*np.sinc(r/cutoff/l)
    w[r>3*l]=0
예제 #15
0
    def __init__(self, power, FWHM_ps, center_wavelength_nm,
                 time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, 
                 GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,
                 power_is_avg = False):
        """Generate sinc pulse A(t) = sqrt(peak_power[W]) * sin(t/T0)/(t/T0)
        centered at wavelength center_wavelength_nm (nm).
        The width is given by FWHM_ps, which is the full-width-at-half-maximum 
        in picoseconds. T0 is equal th FWHM/3.7909885.
        time_window_ps sets temporal grid size. Optional GDD and TOD are
        in ps^2 and ps^3."""
        Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)
        # make sure we weren't passed mks units        
        assert (center_wavelength_nm > 1.0) 
        assert (time_window_ps > 1.0 )                
        self.set_center_wavelength_nm(center_wavelength_nm)        
        self.set_time_window_ps(time_window_ps)

        T0_ps = FWHM_ps/3.7909885
        ### Generate pulse
        if not power_is_avg:
            # numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi
            self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) ) 
        else:
            self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )
            self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))
        
        self.chirp_pulse_W(GDD, TOD)
        self.chirp_pulse_T(chirp2, chirp3, T0_ps)   
예제 #16
0
def plot_pixel_effect():
    fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))
    #fig = plt.figure()
    #ax = fig.add_subplot(111)
    out = fft(ifftshift(f_full))
    freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
    sfreqs = fftshift(freqs)
    taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
    tout = out * taper

    for ax in axs[:, 0]:
        ax.plot(sfreqs, fftshift(tout) / tout[0])
        ax.plot(sfreqs, fftshift(taper))
        ax.plot(sfreqs, 0.0395 * np.sinc(0.0395 * sfreqs))
        ax.plot(sfreqs, 0.0472 * np.sinc(0.0472 * sfreqs))

    for ax in axs[:, 1]:
        ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(tout) / tout[0])))
        ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(taper))))
        ax.plot(sfreqs, 10 * np.log10(np.abs(0.0395 * np.sinc(0.0395 * sfreqs))))
        ax.plot(sfreqs, 10 * np.log10(np.abs(0.0472 * np.sinc(0.0472 * sfreqs))))

    axs[0, 0].set_ylabel("Norm amp")
    axs[1, 0].set_ylabel("Norm amp")
    axs[0, 1].set_ylabel("dB")
    axs[1, 1].set_ylabel("dB")
    for ax in axs.flatten():
        ax.set_xlabel(r"cycles/$\lambda$")
    plt.show()
예제 #17
0
def data_generation(S0,example):
    if example==1:
        x = np.arange(-1,1,0.05)
        y = 0.5+0.25*np.sin(3*np.pi*x)
        x = np.atleast_2d(x)
    if example==2:
        x = np.arange(-1,1,0.05)
        y=signal.square(2*np.pi*x,0.5)
        x = np.atleast_2d(x)  
    if example==3:
        axisx=np.arange(-2,2,0.1)
        axisy=np.arange(-2,2,0.1)
        x=np.zeros([S0,1])
        i=0
        while i<len(axisx):
            j=0
            while j<len(axisy):
                x=np.concatenate((x,np.reshape([axisx[i],axisy[j]],(S0,1))),axis=1)
                j+=1
            i+=1
        X,Y=np.meshgrid(axisx,axisy)
        z = np.sinc(X)*np.sinc(Y) 
        y = z.flatten()
        x = np.delete(x,0,1)
    if example==4:
        Q=400
        #x=np.random.normal(0, 0.5, [S0,Q])
        x=np.random.uniform(-1,1,(S0,Q))
        y=np.zeros([Q,])
        i=0
        while i<Q:
            y[i]=np.sin(2*np.pi*x[0,i])*x[1,i]**2*x[2,i]**3*x[3,i]**4*np.exp(-x[0,i]-x[1,i]-x[2,i]-x[3,i])
            i+=1
    return x,y
예제 #18
0
파일: wavepy.py 프로젝트: jpbos/WavePy
 def PointSource(self):
     #Schmidt Point Source
     DROI = 4.0 *self.DRx                 #Observation plane region [m]
     D1 = self.wvl * self.PropDist / DROI #Central Lobe width [m]
     R = self.PropDist                         #Radius of curvature at wavefront [m
     temp = np.exp(-1j*self.k/(2*R) * (self.r1**2)) / (D1**2)
     pt = temp * np.sinc((self.x1/D1)) * np.sinc((self.y1/D1)) * np.exp(-(self.r1/(4.0 * D1))**2)        
     return pt
예제 #19
0
def calc_window(shape):
	"""Compute fourier-space window function. Like the other fourier-based
	functions in this module, equi-spaced pixels are assumed. Since the
	window function is separable, it is returned as an x and y part,
	such that window = wy[:,None]*wx[None,:]."""
	wy = np.sinc(np.fft.fftfreq(shape[-2]))
	wx = np.sinc(np.fft.fftfreq(shape[-1]))
	return wy, wx
예제 #20
0
파일: 25.marker.py 프로젝트: gree2/hobby
def plot1():
    """plot1"""
    X = np.linspace(-6, 6, 1024)
    Y1 = np.sinc(X)
    Y2 = np.sinc(X) + 1

    plt.plot(X, Y1, marker='*', color='b', markevery=32)
    plt.plot(X, Y2, marker='^', color='r', markevery=32)
    plt.show()
예제 #21
0
파일: TIDES.py 프로젝트: POFK/Tide
 def AutoPowerSpectrum(self,data,window=True):
     x = np.fft.fftfreq(N,1./N)  # x: 0,1,2,...,512,-511,...,-2,-1
     delta_k = np.fft.fftn(data)
     if window==True:
         window_k = np.sinc(1. / N * x[:,None,None]) * np.sinc(1. / N * x[None,:,None]) * np.sinc(1. / N * x[None,None,:])
         Pk = (np.abs(delta_k) / window_k)**2
     else :
         Pk=np.abs(delta_k)**2
     return Pk
예제 #22
0
파일: sampling.py 프로젝트: jpragash/dtcwt
def _upsample_columns(X, method=None):
    """
    The centre of columns of X, an M-columned matrix, are assumed to have co-ordinates
    { 0, 1, 2, ... , M-1 } which means that the up-sampled matrix's columns should sample
    from { -0.25, 0.25, 0.75, ... , M-1.25 }. We can view that as an interleaved set of teo
    *convolutions* of X. The first, A, using a kernel equivalent to sampling the { -0.25, 0.75,
    1.75, 2.75, ... M-1.25 } columns and the second, B, sampling the { 0.25, 1.25, ... , M-0.75 }
    columns.
    """
    if method is None:
        method = 'lanczos'
    
    X = np.atleast_2d(asfarray(X))
    
    out_shape = list(X.shape)
    out_shape[1] *= 2
    output = np.zeros(out_shape, dtype=X.dtype)
    
    # Centres of sampling for A and B convolutions
    M = X.shape[1]
    A_columns = np.linspace(-0.25, M-1.25, M)
    B_columns = A_columns + 0.5
    
    # For A columns sample at x = ceil(x) - 0.25 with ceil(x) = { 0, 1, 2, ..., M-1 }
    # For B columns sample at x = floor(x) + 0.25 with floor(x) = { 0, 1, 2, ..., M-1 }
    int_columns = np.linspace(0, M-1, M)
    
    if method == 'lanczos':
        # Lanczos kernel width
        a = 3.0
        sample_offsets = np.arange(-a, a+1)
       
        # For A: if i = ceil(x) + di, => ceil(x) - i = -0.25 - di
        # For B: if i = floor(x) + di, => floor(x) - i = 0.25 - di
        l_as = np.sinc(-0.25-sample_offsets)*np.sinc((-0.25-sample_offsets)/a)   
        l_bs = np.sinc(0.25-sample_offsets)*np.sinc((0.25-sample_offsets)/a)
    elif method == 'nearest':
        # Nearest neighbour kernel width is 1
        sample_offsets = [0,]
        l_as = l_bs = [1,]
    elif method == 'bilinear':
        # Bilinear kernel width is technically 2 but we need to offset the kernels differently
        # for A and B columns:
        sample_offsets = [-1,0,1]
        l_as = [0.25, 0.75, 0]
        l_bs = [0, 0.75, 0.25]
    else:
        raise ValueError('Unknown interpolation mode: {0}'.format(mode))
    
    # Convolve
    for di, l_a, l_b in zip(sample_offsets, l_as, l_bs):
        columns = reflect(int_columns + di, -0.5, M-0.5).astype(np.int)
        
        output[:,0::2,...] += l_a * X[:,columns,...]
        output[:,1::2,...] += l_b * X[:,columns,...]
    
    return output
예제 #23
0
def compile_2nd_matrix_double_rotor(matrix, Rx2_eigen, smax1, smax2):
    """Generate the rotated 2nd degree Frame Order matrix for the double rotor model.

    The cone axis is assumed to be parallel to the z-axis in the eigenframe.


    @param matrix:      The Frame Order matrix, 2nd degree to be populated.
    @type matrix:       numpy 9D, rank-2 array
    @param Rx2_eigen:   The Kronecker product of the eigenframe rotation matrix with itself.
    @type Rx2_eigen:    numpy 9D, rank-2 array
    @param smax1:       The maximum torsion angle for the first rotor.
    @type smax1:        float
    @param smax2:       The maximum torsion angle for the second rotor.
    @type smax2:        float
    """

    # Zeros.
    matrix[:] = 0.0

    # Repetitive trig calculations.
    sinc_smax1 = sinc(smax1/pi)
    sinc_2smax1 = sinc(2.0*smax1/pi)
    sinc_2smax1p1 = sinc_2smax1 + 1.0
    sinc_2smax1n1 = sinc_2smax1 - 1.0
    sinc_smax2 = sinc(smax2/pi)
    sinc_2smax2 = sinc(2.0*smax2/pi)
    sinc_2smax2p1 = sinc_2smax2 + 1.0
    sinc_2smax2n1 = sinc_2smax2 - 1.0

    # Diagonal.
    matrix[0, 0] = sinc_2smax1 + 1.0
    matrix[1, 1] = 2.0 * sinc_smax1 * sinc_smax2
    matrix[2, 2] = sinc_smax2 * sinc_2smax1p1
    matrix[3, 3] = matrix[1, 1]
    matrix[4, 4] = sinc_2smax2p1
    matrix[5, 5] = sinc_smax1 * sinc_2smax2p1
    matrix[6, 6] = matrix[2, 2]
    matrix[7, 7] = matrix[5, 5]
    matrix[8, 8] = 0.5 * sinc_2smax1p1 * sinc_2smax2p1

    # Off diagonal set 1.
    matrix[4, 0] = 0.5 * sinc_2smax1n1 * sinc_2smax2n1
    matrix[0, 8] = -sinc_2smax1n1
    matrix[8, 0] = -0.5 * sinc_2smax1n1 * sinc_2smax2p1
    matrix[4, 8] = -0.5 * sinc_2smax1p1 * sinc_2smax2n1
    matrix[8, 4] = -sinc_2smax2n1

    # Off diagonal set 2.
    matrix[2, 6] = matrix[6, 2] = sinc_smax2 * sinc_2smax1n1
    matrix[5, 7] = matrix[7, 5] = sinc_smax1 * sinc_2smax2n1

    # Divide by 2.
    multiply(0.5, matrix, matrix)

    # Rotate and return the frame order matrix.
    return rotate_daeg(matrix, Rx2_eigen)
예제 #24
0
def compile_2nd_matrix_pseudo_ellipse(matrix, Rx2_eigen, theta_x, theta_y, sigma_max):
    """Generate the 2nd degree Frame Order matrix for the pseudo-ellipse.

    @param matrix:      The Frame Order matrix, 2nd degree to be populated.
    @type matrix:       numpy 9D, rank-2 array
    @param Rx2_eigen:   The Kronecker product of the eigenframe rotation matrix with itself.
    @type Rx2_eigen:    numpy 9D, rank-2 array
    @param theta_x:     The cone opening angle along x.
    @type theta_x:      float
    @param theta_y:     The cone opening angle along y.
    @type theta_y:      float
    @param sigma_max:   The maximum torsion angle.
    @type sigma_max:    float
    """

    # The surface area normalisation factor.
    fact = 12.0 * pec(theta_x, theta_y)

    # Invert.
    if fact == 0.0:
        fact = 1e100
    else:
        fact = 1.0 / fact

    # Sigma_max part.
    if sigma_max == 0.0:
        fact2 = 1e100
    else:
        fact2 = fact / (2.0 * sigma_max)

    # Diagonal.
    matrix[0, 0] = fact * (4.0*pi*(sinc(2.0*sigma_max/pi) + 2.0) + quad(part_int_daeg2_pseudo_ellipse_00, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[1, 1] = fact * (4.0*pi*sinc(2.0*sigma_max/pi) + quad(part_int_daeg2_pseudo_ellipse_11, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[2, 2] = fact * 2.0*sinc(sigma_max/pi) * (5.0*pi - quad(part_int_daeg2_pseudo_ellipse_22, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[3, 3] = matrix[1, 1]
    matrix[4, 4] = fact * (4.0*pi*(sinc(2.0*sigma_max/pi) + 2.0) + quad(part_int_daeg2_pseudo_ellipse_44, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[5, 5] = fact * 2.0*sinc(sigma_max/pi) * (5.0*pi - quad(part_int_daeg2_pseudo_ellipse_55, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[6, 6] = matrix[2, 2]
    matrix[7, 7] = matrix[5, 5]
    matrix[8, 8] = 4.0 * fact * (2.0*pi - quad(part_int_daeg2_pseudo_ellipse_88, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])

    # Off diagonal set 1.
    matrix[0, 4] = fact * (4.0*pi*(2.0 - sinc(2.0*sigma_max/pi)) + quad(part_int_daeg2_pseudo_ellipse_04, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[4, 0] = fact * (4.0*pi*(2.0 - sinc(2.0*sigma_max/pi)) + quad(part_int_daeg2_pseudo_ellipse_40, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[0, 8] = 4.0 * fact * (2.0*pi - quad(part_int_daeg2_pseudo_ellipse_08, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[8, 0] = fact * (8.0*pi + quad(part_int_daeg2_pseudo_ellipse_80, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[4, 8] = 4.0 * fact * (2.0*pi - quad(part_int_daeg2_pseudo_ellipse_48, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[8, 4] = fact * (8.0*pi - quad(part_int_daeg2_pseudo_ellipse_84, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])

    # Off diagonal set 2.
    matrix[1, 3] = matrix[3, 1] = fact * (4.0*pi*sinc(2.0*sigma_max/pi) + quad(part_int_daeg2_pseudo_ellipse_13, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[2, 6] = matrix[6, 2] = -fact * 4.0 * sinc(sigma_max/pi) * (2.0*pi + quad(part_int_daeg2_pseudo_ellipse_26, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])
    matrix[5, 7] = matrix[7, 5] = -fact * 4.0 * sinc(sigma_max/pi) * (2.0*pi + quad(part_int_daeg2_pseudo_ellipse_57, -pi, pi, args=(theta_x, theta_y, sigma_max), full_output=1)[0])

    # Rotate and return the frame order matrix.
    return rotate_daeg(matrix, Rx2_eigen)
예제 #25
0
파일: source.py 프로젝트: uwoseis/zephyr
    def kws(self, offset, aZi, aXi):
        '''
        Finds 2D source terms to approximate a band-limited point source, based on
        Hicks, Graham J. (2002) Arbitrary source and receiver positioning in finite-difference
            schemes using Kaiser windowed sinc functions. Geophysics (67) 1, 156-166.
        KaiserWindowedSinc(ireg, offset) --> 2D ndarray of size (2*ireg+1, 2*ireg+1)
        Input offset is the 2D offsets in fractional gridpoints between the source location and
        the nearest node on the modelling grid.

        Args:
            offset (tuple): Distance of the centre of the source region from the true source
                            point (in 2D coordinates, in units of cells).

        Returns:
            np.ndarray: Interpolated source region of size (2*ireg+1, 2*ireg+1)
        '''

        try:
            b = self.HC_KAISER.get(self.ireg)
        except KeyError:
            print('Kaiser windowed sinc function not implemented for half-width of %d!'%(self.ireg,))
            raise

        freg = 2*self.ireg+1

        xOffset, zOffset = offset

        # Grid from 0 to freg-1
        Zi, Xi = np.mgrid[:freg,:freg]

        Zi, Xi = self.modifyGrid(Zi, Xi, aZi, aXi)

        # Distances from source point
        dZi = (zOffset + self.ireg - Zi)
        dXi = (xOffset + self.ireg - Xi)

        # Taper terms for decay function
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            tZi = np.nan_to_num(np.sqrt(1 - (dZi / self.ireg)**2))
            tXi = np.nan_to_num(np.sqrt(1 - (dXi / self.ireg)**2))
            tZi[tZi == np.inf] = 0
            tXi[tXi == np.inf] = 0

        # Actual tapers for Kaiser window
        taperZ = bessi0(b*tZi) / bessi0(b)
        taperX = bessi0(b*tXi) / bessi0(b)

        # Windowed sinc responses in Z and X
        responseZ = np.sinc(dZi) * taperZ
        responseX = np.sinc(dXi) * taperX

        # Combined 2D source response
        result = responseX * responseZ

        return result
예제 #26
0
파일: pixelgrid.py 프로젝트: rmjarvis/Piff
    def _kernel1d(self, u):
        """ Calculate the 1d interpolation kernel at each value in array u.

        :param u: 1d array of (u_dest-u_src) spanning the footprint of the kernel.

        :returns: interpolation kernel values at these grid points
        """
        # Normalize Lanczos to unit sum over kernel elements
        k = np.sinc(u) * np.sinc(u/self.order)
        return k / np.sum(k,axis=1)[:,np.newaxis]
def transformEq(numPoints, coefs, r):
    r=0.6
    plotBasis = np.linspace(0,1,numPoints)
    func = np.ones(numPoints) * coefs[0] * 0.5
    m = len(coefs)/2
    for i in range(m):
        n=i+1
        func += np.cos(2*n*np.pi*plotBasis)*coefs[2*i+1] * np.sinc(1.0*n/(m+1))**1 * r**n
        func += np.sin(2*n*np.pi*plotBasis)*coefs[2*i+2] * np.sinc(1.0*n/(m+1))**1 * r**n
        
    return func
예제 #28
0
 def Lanczos(self,dx):
     """
     Lanczos filter weights
     
     !!!Need to check this!!!
     """        
     a = self.p
     
     Gtmp = np.sinc(dx) * np.sinc(dx/a)
     
     return Gtmp / np.sum(Gtmp)
예제 #29
0
파일: TIDES.py 프로젝트: POFK/Tide
 def CrossPowerSpectrum(self,data1,data2,window=False):
     '''data1:delta,data2:kappa'''
     x = np.fft.fftfreq(N,1./N)  # x: 0,1,2,...,512,-511,...,-2,-1
     delta_k1 = np.fft.fftn(data1)
     window_k = np.sinc(1. / N * x[:,None,None]) * np.sinc(1. / N * x[None,:,None]) * np.sinc(1. / N * x[None,None,:])
     delta_k1=delta_k1/window_k
     delta_k2 = np.fft.fftn(data2)
     if window==True:
         delta_k2=delta_k2/window_k
     Pk=(delta_k1.conjugate()*delta_k2+delta_k2.conjugate()*delta_k1)/2
     return Pk.real
예제 #30
0
def Ormsby(f,t):
    assert len(f) == 4, 'Ormsby wavelet needs 4 frequencies as input'
    f = np.sort(f) # Ormsby wavelet frequencies must be in increasing order
    pif   = np.pi*f
    den1  = pif[3] - pif[2]
    den2  = pif[1] - pif[0]
    term1 = (pif[3]*np.sinc(pif[3]*t))**2 - (pif[2]*np.sinc(pif[2]))**2
    term2 = (pif[1]*np.sinc(pif[1]*t))**2 - (pif[0]*np.sinc(pif[0]))**2

    wav   = term1/den1 - term2/den2;
    return wav
예제 #31
0
def func(x, shift, noise):
    y = numpy.sinc(x / 2 + shift) + 0.04 * x
    for i in range(len(y)):
        y[i] += noise * numpy.random.randn(1)
    return y
예제 #32
0
    pulse[0] = bit * 2 - 1  # set the first value to either a 1 or -1
    pulse_train = np.concatenate(
        (pulse_train, pulse))  # add the 8 samples to the signal
fig, ax = plt.subplots(1, figsize=(8, 2))  # 7 is nearly full width
plt.plot(pulse_train, '.-')
plt.grid(True)
fig.savefig('/tmp/time-sync-original-data.svg', bbox_inches='tight')

original_data = pulse_train  # save for plotting later

# Create our raised-cosine filter
num_taps = 101
beta = 0.35
Ts = sps  # Assume sample rate is 1 Hz, so sample period is 1, so *symbol* period is 8
t = np.arange(-51, 52)  # remember it's not inclusive of final number
h = np.sinc(t / Ts) * np.cos(np.pi * beta * t / Ts) / (1 -
                                                       (2 * beta * t / Ts)**2)
# Plot filter taps
# plt.figure(1)
# plt.plot(t, h, '.')
# plt.grid(True)

# Filter our signal, in order to apply the pulse shaping
samples = np.convolve(pulse_train, h)
fig, ax = plt.subplots(1, figsize=(7, 3))  # 7 is nearly full width
symbols_to_plot = 10
plt.plot(samples[0:symbols_to_plot * sps + (num_taps - 1) // 2], '.-')
for i in range(symbols_to_plot):
    plt.plot([i * sps + num_taps // 2 + 1, i * sps + num_taps // 2 + 1],
             [min(samples), max(samples)], 'g')
plt.grid(True)
예제 #33
0
def generate_cf32_pulse(numSamps, width=5, scaleFactor=0.3):
    x = np.linspace(-width, width, numSamps)
    pulse = np.sinc(x).astype(np.complex64)
    return pulse * scaleFactor
예제 #34
0
def kepfilter(infile,
              outfile,
              passband,
              datacol='SAP_FLUX',
              function='boxcar',
              cutoff=1.0,
              plot=False,
              overwrite=False,
              verbose=False,
              logfile='kepfilter.log'):
    """
    kepfilter -- bandpass filtering of Kepler light curve data

    ``kepfilter`` applies a bandpass filter to Kepler light curve data. In the
    low bandpass option, the data is convolved with a function of
    user-specified width. Choices of convolution function are **boxcar**,
    **Gaussian** or **sinc**. In the high bandpass option the convolution minus
    the median of the convolution is subtracted from the original data. The
    filtered data is copied to a new FITS file with the same structure as the
    input file.

    Parameters
    ----------
    infile : str
        The name of a MAST standard format FITS file containing Kepler light
        curve data within the first data extension.
    outfile : str
        The name of the output FITS file. The output file is identical in
        format to the input file. The data to be filtered will be overwritten
        in the output file by its filtered version.
    datacol : str
        The name of the data column in the input FITS file to be filtered, e.g.
        SAP_FLUX, PDCSAP_FLUX, MOM_CENTR1 etc. A full list of
        archived data columns is provided in the Kepler Archive Manual.
    function : string
        The functional form of the bandpass convolution function.
        The options are:

        * boxcar

        * gauss

        * sinc
    cutoff : float
        The frequency of the bandpass cutoff in units of days-1.
    passband : str
        The type of filter to be applied. A low bandpass filter will suppress
        high-frequency signal shorter than the cutoff. A high bandpass filter
        will suppress low-frequency signal longer than the cutoff.
        The options are:

        * low

        * high
    plot : bool
        Plot the original light curve and the result of the filter?
    overwrite : bool
        Overwrite the output file? if overwrite is **False** and an existing
        file has the same name as outfile then the task will stop with an
        error.
    verbose : bool
        Print informative messages and warnings to the shell and logfile?
    logfile : str
        Name of the logfile containing error and warning messages.

    Examples
    --------

    .. code-block :: bash

        $ kepfilter kplr002436324-2009259160929_llc.fits kepfilter.fits --datacol 'SAP_FLUX' --function 'boxcar'
        --plot --verbose --overwrite

    .. image :: ../_static/images/api/kepfilter.png
        :align: center
    """
    ## log the call
    hashline = '--------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = ('KEPFILTER -- ' + ' infile={}'.format(infile) +
            ' outfile={}'.format(outfile) + ' datacol={}'.format(datacol) +
            ' function={}'.format(function) + ' cutoff={}'.format(cutoff) +
            ' passband={}'.format(passband) + ' plot={}'.format(plot) +
            ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) +
            ' logfile={}'.format(logfile))
    kepmsg.log(logfile, call + '\n', verbose)
    ## start time
    kepmsg.clock('KEPFILTER started at', logfile, verbose)
    ## overwrite output file
    if overwrite:
        kepio.overwrite(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        errmsg = 'ERROR -- KEPFILTER: {} exists. Use --overwrite'.format(
            outfile)
        kepmsg.err(logfile, message, verbose)

    ## open input file
    instr = pyfits.open(infile, 'readonly')
    tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
                                                    verbose)
    try:
        work = instr[0].header['FILEVER']
        cadenom = 1.0
    except:
        cadenom = cadence

    ## fudge non-compliant FITS keywords with no values
    instr = kepkey.emptykeys(instr, infile, logfile, verbose)
    ## read table structure
    table = kepio.readfitstab(infile, instr[1], logfile, verbose)
    # read time and flux columns
    barytime = kepio.readtimecol(infile, table, logfile, verbose)
    flux = kepio.readsapcol(infile, table, logfile, verbose)
    # filter input data table
    try:
        nanclean = instr[1].header['NANCLEAN']
    except:
        naxis2 = 0
        for i in range(len(table.field(0))):
            if (np.isfinite(barytime[i]) and np.isfinite(flux[i])
                    and flux[i] != 0.0):
                table[naxis2] = table[i]
                naxis2 += 1
        instr[1].data = table[:naxis2]
        kepkey.new('NANCLEAN', True, 'NaN cadences removed from data',
                   instr[1], outfile, logfile, verbose)

    ## read table columns
    intime = (kepio.readtimecol(infile, instr[1].data, logfile, verbose) +
              bjdref)
    indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
                               verbose) / cadenom
    ## define data sampling
    tr = 1.0 / (cadence / 86400)
    timescale = 1.0 / (cutoff / tr)
    ## define convolution function
    if function == 'boxcar':
        filtfunc = np.ones(int(np.ceil(timescale)))
    elif function == 'gauss':
        timescale /= 2
        dx = np.ceil(timescale * 10 + 1)
        filtfunc = kepfunc.gauss([1.0, dx / 2 - 1.0, timescale],
                                 np.linspace(0, dx - 1, dx))
    elif function == 'sinc':
        dx = np.ceil(timescale * 12 + 1)
        fx = (np.linspace(0, dx - 1, dx) - dx / 2 + 0.5) / timescale
        filtfunc = np.sinc(fx)

    filtfunc /= np.sum(filtfunc)
    ## pad time series at both ends with noise model
    ave, sigma = (np.mean(indata[:len(filtfunc)]),
                  np.std(indata[:len(filtfunc)]))
    padded = np.append(
        kepstat.randarray(
            np.ones(len(filtfunc)) * ave,
            np.ones(len(filtfunc)) * sigma), indata)
    ave, sigma = (np.mean(indata[-len(filtfunc):]),
                  np.std(indata[-len(filtfunc):]))
    padded = np.append(
        padded,
        kepstat.randarray(
            np.ones(len(filtfunc)) * ave,
            np.ones(len(filtfunc)) * sigma))
    ## convolve data
    convolved = np.convolve(padded, filtfunc, 'same')
    ## remove padding from the output array
    if function == 'boxcar':
        outdata = convolved[len(filtfunc):-len(filtfunc)]
    else:
        outdata = convolved[len(filtfunc):-len(filtfunc)]
    ## subtract low frequencies
    if passband == 'high':
        outmedian = np.median(outdata)
        outdata = indata - outdata + outmedian
    ## comment keyword in output file
    kepkey.history(call, instr[0], outfile, logfile, verbose)
    ## clean up x-axis unit
    intime0 = float(int(tstart / 100) * 100.0)
    if intime0 < 2.4e6: intime0 += 2.4e6
    ptime = intime - intime0
    xlab = 'BJD $-$ {}'.format(intime0)
    ## clean up y-axis units
    pout = indata * 1.0
    pout2 = outdata * 1.0
    nrm = len(str(int(np.nanmax(pout)))) - 1
    pout = pout / 10**nrm
    pout2 = pout2 / 10**nrm
    ylab = '10$^{}$ {}'.format(nrm, 'e$^-$ s$^{-1}$')
    ## data limits
    xmin = ptime.min()
    xmax = ptime.max()
    ymin = np.nanmin(pout)
    ymax = np.nanmax(pout)
    xr = xmax - xmin
    yr = ymax - ymin
    ptime = np.insert(ptime, [0], [ptime[0]])
    ptime = np.append(ptime, [ptime[-1]])
    pout = np.insert(pout, [0], [0.0])
    pout = np.append(pout, 0.0)
    pout2 = np.insert(pout2, [0], [0.0])
    pout2 = np.append(pout2, 0.0)
    ## plot light curve
    if plot:
        plt.figure()
        plt.clf()

        ## plot filtered data
        ax = plt.axes([0.06, 0.1, 0.93, 0.87])
        plt.gca().xaxis.set_major_formatter(
            plt.ScalarFormatter(useOffset=False))
        plt.gca().yaxis.set_major_formatter(
            plt.ScalarFormatter(useOffset=False))
        plt.plot(ptime, pout, color='#ff9900', linestyle='-', linewidth=1.0)
        plt.fill(ptime, pout, color='#ffff00', linewidth=0.0, alpha=0.2)
        if passband == 'low':
            plt.plot(ptime[1:-1],
                     pout2[1:-1],
                     color='#0000ff',
                     linestyle='-',
                     linewidth=1.0)
        else:
            plt.plot(ptime,
                     pout2,
                     color='#0000ff',
                     linestyle='-',
                     linewidth=1.0)
            plt.fill(ptime, pout2, color='#0000ff', linewidth=0.0, alpha=0.2)
        plt.xlabel(xlab, {'color': 'k'})
        plt.ylabel(ylab, {'color': 'k'})
        plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
        if ymin >= 0.0:
            plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
        else:
            plt.ylim(1.0e-10, ymax + yr * 0.01)
        plt.grid()
        # render plot
        plt.show()
    ## write output file
    for i in range(len(outdata)):
        instr[1].data.field(datacol)[i] = outdata[i]
    instr.writeto(outfile)
    ## close input file
    instr.close()
    ## end time
    kepmsg.clock('KEPFILTER completed at', logfile, verbose)
예제 #35
0
def firwin(
    numtaps,
    cutoff,
    width=None,
    window="hamming",
    pass_zero=True,
    scale=True,
    nyq=1.0,
    fs=None,
    gpupath=True,
):
    """
    FIR filter design using the window method.

    This function computes the coefficients of a finite impulse response
    filter.  The filter will have linear phase; it will be Type I if
    `numtaps` is odd and Type II if `numtaps` is even.

    Type II filters always have zero response at the Nyquist frequency, so a
    ValueError exception is raised if firwin is called with `numtaps` even and
    having a passband whose right end is at the Nyquist frequency.

    Parameters
    ----------
    numtaps : int
        Length of the filter (number of coefficients, i.e. the filter
        order + 1).  `numtaps` must be odd if a passband includes the
        Nyquist frequency.
    cutoff : float or 1D array_like
        Cutoff frequency of filter (expressed in the same units as `fs`)
        OR an array of cutoff frequencies (that is, band edges). In the
        latter case, the frequencies in `cutoff` should be positive and
        monotonically increasing between 0 and `fs/2`.  The values 0 and
        `fs/2` must not be included in `cutoff`.
    width : float or None, optional
        If `width` is not None, then assume it is the approximate width
        of the transition region (expressed in the same units as `fs`)
        for use in Kaiser FIR filter design.  In this case, the `window`
        argument is ignored.
    window : string or tuple of string and parameter values, optional
        Desired window to use. See `cusignal.get_window` for a list
        of windows and required parameters.
    pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'},
        optional
        If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
        If False, the DC gain is 0. Can also be a string argument for the
        desired filter type (equivalent to ``btype`` in IIR design functions).

        .. versionadded:: 1.3.0
           Support for string arguments.
    scale : bool, optional
        Set to True to scale the coefficients so that the frequency
        response is exactly unity at a certain frequency.
        That frequency is either:

        - 0 (DC) if the first passband starts at 0 (i.e. pass_zero
          is True)
        - `fs/2` (the Nyquist frequency) if the first passband ends at
          `fs/2` (i.e the filter is a single band highpass filter);
          center of first passband otherwise

    nyq : float, optional
        *Deprecated.  Use `fs` instead.*  This is the Nyquist frequency.
        Each frequency in `cutoff` must be between 0 and `nyq`. Default
        is 1.
    fs : float, optional
        The sampling frequency of the signal.  Each frequency in `cutoff`
        must be between 0 and ``fs/2``.  Default is 2.
    gpupath : bool, Optional
        Optional path for filter design. gpupath == False may be desirable if
        filter sizes are small.

    Returns
    -------
    h : (numtaps,) ndarray
        Coefficients of length `numtaps` FIR filter.

    Raises
    ------
    ValueError
        If any value in `cutoff` is less than or equal to 0 or greater
        than or equal to ``fs/2``, if the values in `cutoff` are not strictly
        monotonically increasing, or if `numtaps` is even but a passband
        includes the Nyquist frequency.

    See Also
    --------
    firwin2
    firls
    minimum_phase
    remez

    Examples
    --------
    Low-pass from 0 to f:

    >>> import cusignal
    >>> numtaps = 3
    >>> f = 0.1
    >>> cusignal.firwin(numtaps, f)
    array([ 0.06799017,  0.86401967,  0.06799017])

    Use a specific window function:

    >>> cusignal.firwin(numtaps, f, window='nuttall')
    array([  3.56607041e-04,   9.99286786e-01,   3.56607041e-04])

    High-pass ('stop' from 0 to f):

    >>> cusignal.firwin(numtaps, f, pass_zero=False)
    array([-0.00859313,  0.98281375, -0.00859313])

    Band-pass:

    >>> f1, f2 = 0.1, 0.2
    >>> cusignal.firwin(numtaps, [f1, f2], pass_zero=False)
    array([ 0.06301614,  0.88770441,  0.06301614])

    Band-stop:

    >>> cusignal.firwin(numtaps, [f1, f2])
    array([-0.00801395,  1.0160279 , -0.00801395])

    Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):

    >>> f3, f4 = 0.3, 0.4
    >>> cusignal.firwin(numtaps, [f1, f2, f3, f4])
    array([-0.01376344,  1.02752689, -0.01376344])

    Multi-band (passbands are [f1, f2] and [f3,f4]):

    >>> cusignal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
    array([ 0.04890915,  0.91284326,  0.04890915])

    """
    if gpupath:
        pp = cp
    else:
        pp = np

    cutoff = pp.atleast_1d(cutoff) / float(nyq)

    # print("cutoff", cutoff.size)

    # Check for invalid input.
    if cutoff.ndim > 1:
        raise ValueError("The cutoff argument must be at most "
                         "one-dimensional.")
    if cutoff.size == 0:
        raise ValueError("At least one cutoff frequency must be given.")
    if cutoff.min() <= 0 or cutoff.max() >= 1:
        raise ValueError("Invalid cutoff frequency: frequencies must be "
                         "greater than 0 and less than nyq.")
    if pp.any(pp.diff(cutoff) <= 0):
        raise ValueError("Invalid cutoff frequencies: the frequencies "
                         "must be strictly increasing.")

    if width is not None:
        # A width was given.  Find the beta parameter of the Kaiser window
        # and set `window`.  This overrides the value of `window` passed in.
        atten = kaiser_atten(numtaps, float(width) / nyq)
        beta = kaiser_beta(atten)
        window = ("kaiser", beta)

    pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
    if pass_nyquist and numtaps % 2 == 0:
        raise ValueError("A filter with an even number of coefficients must "
                         "have zero response at the Nyquist rate.")

    # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
    # is even, and each pair in cutoff corresponds to passband.
    cutoff = pp.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))

    # `bands` is a 2D array; each row gives the left and right edges of
    # a passband.
    bands = cutoff.reshape(-1, 2)

    if gpupath:
        win = get_window(window, numtaps, fftbins=False)
        h, hc = _firwin_kernel(win, numtaps, bands, bands.shape[0], scale)
        if scale:
            s = cp.sum(hc)
            h /= s
    else:
        try:
            win = signal.get_window(window, numtaps, fftbins=False)
        except NameError:
            raise RuntimeError("CPU path requires SciPy Signal's get_windows.")

        # Build up the coefficients.
        alpha = 0.5 * (numtaps - 1)
        m = np.arange(0, numtaps) - alpha
        h = 0
        for left, right in bands:
            h += right * np.sinc(right * m)
            h -= left * np.sinc(left * m)

        h *= win

        # Now handle scaling if desired.
        if scale:
            # Get the first passband.
            left, right = bands[0]
            if left == 0:
                scale_frequency = 0.0
            elif right == 1:
                scale_frequency = 1.0
            else:
                scale_frequency = 0.5 * (left + right)
            c = np.cos(np.pi * m * scale_frequency)
            s = np.sum(h * c)
            h /= s

    return h
예제 #36
0
        y1 = f(x - T * n)
        sumY = sumY + y1
        if ax is not None:
            ax.plot(x, y1, color='C0', linestyle=':')
            ax.fill_between(x, 0, y1, facecolor='#999999', alpha=.5)

        if n != 0:
            y2 = f(x + T * n)
            sumY = sumY + y2
            if ax is not None:
                ax.plot(x, y2, color='C0', linestyle=':')
                ax.fill_between(x, 0, y2, facecolor='#999999', alpha=.5)
    return x, sumY


fsinc = lambda x, rate: np.sinc(x * rate)
f = lambda x: .4 * np.sin(.12 * (x - 3) + 1) + np.sin(.2 * (x - 2)) + np.cos(
    .5 * x) + np.sin(.7 * x + 3) + 1
T = 1
M = 2
# g for downsampling, M is the down rate
g = lambda x, M: np.where(
    x <= -M, 0,
    np.where(x < 0, 1.0 / (M**2) * x + 1.0 / M,
             np.where(x < M, 1.0 / M - 1.0 / (M**2) * x, 0)))
g2 = lambda x: g(x, 2)
g3 = lambda x: g(x, 3)
lowpass = lambda x: np.where(abs(x) > 2, 0, 1)

x = np.arange(-20, 20, T)
y = f(x)
예제 #37
0
def eyediagram():##funcion utilizada para exponer los diagramas de ojos de los pulsos 

	alpha_ojo = 0.9999999999999##valor cercano a 1 entrega un diagrama mas limpio
	coseno_alzado2 = (np.sinc(t1) * np.cos(alpha_ojo*np.pi*t1))/(1-(4*alpha_ojo*alpha_ojo*t1*t1))
	T=5###valor del periodo para el tren de impulsos
	tren2=np.array(range(10000*T), dtype="int")

	##tren para valores de periodo impar
	contador=floor(T/2);
	for x in range(0,10000*T):
	
		if (contador==0):
			tren2[x]=randint(0,1)
			contador=floor(T/2)*2
			if (tren2[x]==0):
				tren2[x]=-1

		else:
			tren2[x]=0
			contador=contador-1


	resultado3=np.convolve(sinc,tren2,'same')##resultado de la convolucion del pulso sinc con el respectivo tren de impulsos
	resultado4=np.convolve(coseno_alzado2,tren2,'same')##resultado de la convolucion del pulso coseno alzado con el respectivo tren de impulsos

	##primero se exponen los diagramas de ojo resultantes solo de la convolucion

	ojo_sinc=np.reshape(resultado3,(2*T,len(resultado3)/(2*T)), order='F')
	ojo_coseno_alzado=np.reshape(resultado4,(2*T,len(resultado4)/(2*T)), order='F')
	
	plt.subplot(2,1,1)
	plt.title('Diagrama de ojo pulso Sinc')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
	plt.grid(True)	
	plt.plot(ojo_sinc,'b')

	plt.subplot(2,1,2)
	plt.title('Diagrama de ojo pulso coseno alzado')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
	plt.grid(True)	
	plt.plot(ojo_coseno_alzado,'b')

	plt.show()

	##ahora se expondran los diagramas de ojo con ruido awgn agregado
	# Numero de datos a utilizar
	N = 50000
	# razon señal ruido
	SNR = 20

	# Create some data with noise and a sinusoidal
	# variation.
	y = np.random.normal(0.0, 1.0/SNR, N) + 1.0
	
	ruido_sinc= y+resultado3
	ruido_rc=y+resultado4

	ojo_ruido_sinc=np.reshape(ruido_sinc,(2*T,len(ruido_sinc)/(2*T)), order='F')
	ojo_ruido_coseno_alzado=np.reshape(ruido_rc,(2*T,len(ruido_rc)/(2*T)), order='F')
	
	plt.subplot(2,1,1)
	plt.title('Diagrama de ojo pulso Sinc con ruido')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
	plt.grid(True)	
	plt.plot(ojo_ruido_sinc,'b')

	plt.subplot(2,1,2)
	plt.title('Diagrama de ojo pulso coseno alzado con ruido')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
	plt.grid(True)	
	plt.plot(ojo_ruido_coseno_alzado,'b')

	plt.show()
예제 #38
0
파일: result.py 프로젝트: POFK/Tide
Inputfilename='/home/zhm/tides20/0.000halo.bin'
Outputfilename='/home/mtx/data/tide/halo_new/outdata/log_data/'
print Inputfilename
print Outputfilename
########################################load data########################################
halo_x=Tide.LoadData(filename=Inputfilename)
#####################add later of wiener filter and smooth######################
Kf=2*np.pi/(L)
x = np.fft.fftfreq(N,1./N)  # x: 0,1,2,...,512,-511,...,-2,-1
sum=halo_x.sum()
print sum
halo_x=(N**3)/sum*halo_x
halo_k=np.fft.fftn(halo_x)
#********************************************************************************
k=(x[:,None,None]**2.+x[None,:,None]**2.+x[None,None,:]**2.)**(1./2.)
window_k = np.sinc( 1./N* x[:,None,None]) * np.sinc( 1./N * x[None,:,None]) * np.sinc( 1./N * x[None,None,:])

Ph=L**3/N**6*np.abs(halo_k)**2
Tide.SaveDataHdf5(Ph,Outputfilename+'0.000halo00_Pk_halo.hdf5')
Ph=Ph*(np.exp(-0.5*(Kf*Kf)*k*k*sigma**2)/window_k)**2
W=Ph/(Ph+(L**3)/sum)  #wiener filter
#********************************************************************************
bias=np.sqrt(2.32)
del halo_x
halo_k=halo_k*W
########################## smooth and window function###########################
#k=(x[:,None,None]**2.+x[None,:,None]**2.+x[None,None,:]**2.)**(1./2.)
#window_k = np.sinc( 1./N* x[:,None,None]) * np.sinc( 1./N * x[None,:,None]) * np.sinc( 1./N * x[None,None,:])

halo_k=halo_k*(np.exp(-0.5*(Kf*Kf)*k*k*sigma**2))/window_k
deltag=np.fft.ifftn(halo_k).real
예제 #39
0
wi = mu0 * c0 / np.sqrt(er)

# Interaction region dimensions (most simple case)
de = 8 * le
da = 8 * la
Lx = da
Ly = de

# Interaction region dimensions (refined)
#de = 10*le
#da = 10*la
#Lx = da
#Ly = de

# Cuboid phi, 2D
Phi_c = (np.sinc(Lx / 2 / np.pi *
                 (k - k * np.cos(phi_m) + pm * q * np.cos(alpha_m))) *
         np.sinc(Ly / 2 / np.pi *
                 (-k * np.sin(phi_m) + pm * q * np.sin(alpha_m))))

# Parallelogram phi, 2D
Phi_p = (np.sinc(da / 2 / np.pi / np.sin(alpha_m) *
                 (k - k * np.cos(phi_m) + pm * q * np.cos(alpha_m))) *
         np.sinc(de / 2 / np.pi / np.tan(alpha_m) *
                 (k - k *
                  (np.cos(phi_m) + np.sin(phi_m) * np.tan(alpha_m)) + pm * q *
                  (np.cos(alpha_m) + np.sin(alpha_m) * np.tan(alpha_m)))))

# Normal component of Poynting vector, cuboid
# This is the same as the magnitude for a circular boundary
Sn_c = (0.5 / wi * Ei0**2 * er**2 * k**3 * ph**2 * p0**2 / 8 / np.pi / r /
        kbm**2 * Lx**2 * Ly**2 * Phi_c**2)
예제 #40
0
def sinc_cov(x, x_prime, variance=1., w=1.):
    """Sinc covariance function."""
    r = np.linalg.norm(x - x_prime, 2)
    return variance * np.sinc(np.pi * w * r)
예제 #41
0
 def create_convolved_profiles(self, path):  #{{{
     if (os.path.isfile(path + 'convolved_profiles.npz')
             and not self.num.debugging) or (self.convolved_signal_arr
                                             is not None):
         raise RuntimeError('Convolved profiles already exist.')
     if (self.signal_arr is not None and self.theta_out_arr is not None):
         pass
     else:
         if os.path.isfile(path + 'profiles.npz'):
             f = np.load(path + 'profiles.npz')
             self.theta_out_arr = f['theta_out']
             self.signal_arr = f['signal']
         else:
             raise RuntimeError('Profiles have not already been computed.')
     if (self.num.Wiener_filter is not None):
         print 'Using Wiener filter ' + self.num.Wiener_filter
         self.convolve_with_Wiener = True
     else:
         self.convolve_with_Wiener = False
     if (self.num.pixel_radius is not None):
         print 'Convolving with circular pixel.'
         self.convolve_with_circular_pixel = True
     else:
         self.convolve_with_circular_pixel = False
     if (self.num.pixel_sidelength is not None):
         print 'Convolving with quadratic pixel.'
         self.convolve_with_quadratic_pixel = True
     else:
         self.convolve_with_quadratic_pixel = False
     if self.convolve_with_circular_pixel and self.convolve_with_quadratic_pixel:
         raise RuntimeError(
             'You want to convolve with circular AND quadratic pixels.')
     if self.num.gaussian_kernel_FWHM is not None:
         print 'Applying additional effective Gaussian smoothing.'
     if self.convolve_with_quadratic_pixel:
         if os.path.isfile('./constants/quadratic_pixel_W_ell.npz'):
             print 'Reading W_ell for quadratic pixel from file.'
             f = np.load('./constants/quadratic_pixel_W_ell.npz')
             pixel_ell = f['ell']
             pixel_W_ell = f['W_ell']
         else:
             print 'Computing W_ell for quadratic pixel.'
             pixel_ell = np.logspace(-2., 3., base=10., num=int(1e5))
             pixel_W_ell = np.empty(len(pixel_ell))
             for ii in xrange(len(pixel_ell)):
                 B_ell_phi = lambda phi: np.sinc(0.5 * pixel_ell[
                     ii] * np.cos(phi))**1. * np.sinc(0.5 * pixel_ell[ii] *
                                                      np.sin(phi))**1.
                 pixel_W_ell[ii], _ = quad(B_ell_phi, 0., np.pi / 4.)
             pixel_W_ell *= 4. / np.pi
             np.savez(path + 'quadratic_pixel_W_ell.npz',
                      ell=pixel_ell,
                      W_ell=pixel_W_ell)
         quadratic_pixel_window_function_interp = interp1d(
             pixel_ell,
             pixel_W_ell,
             kind='quadratic',
             bounds_error=False,
             fill_value=(1., 0.))
         quadratic_pixel_window_function = lambda ell: quadratic_pixel_window_function_interp(
             ell)
     DHTobj = GSL_DHT.DiscreteHankelTransform(self.num.Npoints_theta)
     DHTobj.init(0, 1.)
     self.convolved_signal_arr = np.empty(
         self.signal_arr.shape
     )  # assumes that theta is the last (3rd) direction
     for ii in xrange(self.signal_arr.shape[0]):  # logM-loop
         start = time()
         for jj in xrange(self.signal_arr.shape[1]):  # z-loop
             _, reci_signal = DHTobj.apply(self.signal_arr[ii, jj, :])
             Window = np.ones(self.num.Npoints_theta)
             if self.convolve_with_circular_pixel:
                 Window *= profiles.__circular_pixel_window_function(
                     self.num.scaled_reci_theta_grid *
                     self.num.pixel_radius / self.theta_out_arr[ii, jj])
             elif self.convolve_with_quadratic_pixel:
                 Window *= quadratic_pixel_window_function(
                     self.num.scaled_reci_theta_grid * 0.5 *
                     self.num.pixel_sidelength / self.theta_out_arr[ii, jj])
             else:
                 warn(
                     'You called create_convolved_profiles without actually convolving',
                     UserWarning)
             if self.num.gaussian_kernel_FWHM is not None:
                 Window *= profiles._gaussian_pixel_window_function(
                     self.num.scaled_reci_theta_grid *
                     self.num.gaussian_kernel_FWHM /
                     self.theta_out_arr[ii, jj])
             if self.convolve_with_Wiener:
                 Window *= self.num.Wiener_filter(
                     self.num.scaled_reci_theta_grid /
                     self.theta_out_arr[ii, jj])
             reci_signal = reci_signal * Window
             _, self.convolved_signal_arr[ii,
                                          jj, :] = DHTobj.apply(reci_signal)
             self.convolved_signal_arr[ii, jj, :] *= (
                 self.num.scaled_reci_theta_grid[-1]**2.)
             d = np.diff(self.convolved_signal_arr[ii, jj, :])
         end = time()
         if (ii % 4 == 0) and self.num.verbose:
             print str(
                 (end - start) / 60. *
                 (self.signal_arr.shape[0] -
                  ii)) + ' minutes remaining in create_convolved_profiles.'
     np.savez(path + 'convolved_profiles.npz',
              theta_out=self.theta_out_arr,
              convolved_signal=self.convolved_signal_arr)
예제 #42
0
def generate_mfp2DSym(**argv):

    #load data
    data = load_data('mfp')
    Kacc = data['Kacc']
    mfp_bulk = data['mfp']
    kappa_bulk = Kacc[-1]
    kappa_bulk = np.zeros_like(Kacc)
    kappa_bulk[0] = Kacc[0]
    for n in range(len(Kacc) - 1):
        kappa_bulk[n + 1] = Kacc[n + 1] - Kacc[n]

    #pruning--
    I = np.where(mfp_bulk > 0)
    kappa_bulk = kappa_bulk[I]
    mfp_bulk = mfp_bulk[I]
    kappa = np.eye(3) * np.sum(kappa_bulk)

    #Get options----
    n_phi = int(argv.setdefault('n_phi', 48))
    n_mfp = int(argv.setdefault('n_mfp', 50))
    n_theta = int(argv.setdefault('n_theta', 24))

    nm = n_phi * n_mfp

    #Create sampled MFPs
    n_mfp_bulk = len(mfp_bulk)
    mfp_sampled = np.logspace(min([-2, np.log10(min(mfp_bulk) * 0.99)]),
                              np.log10(max(mfp_bulk) * 1.01),
                              n_mfp)  #min MFP = 1e-2 nm

    #Polar Angle---------
    Dphi = 2 * np.pi / n_phi
    #phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True)
    phi = np.linspace(0, 2.0 * np.pi, n_phi, endpoint=False)
    #--------------------

    #Azimuthal Angle------------------------------
    Dtheta = np.pi / n_theta / 2.0
    theta = np.linspace(Dtheta / 2.0, np.pi / 2.0 - Dtheta / 2.0, n_theta)
    dtheta = 2.0 * np.sin(Dtheta / 2.0) * np.sin(theta)
    domega = np.outer(dtheta, Dphi * np.ones(n_phi))

    #Compute directions---
    polar = np.array([np.sin(phi), np.cos(phi), np.zeros(n_phi)]).T
    azimuthal = np.array([np.sin(theta), np.sin(theta), np.zeros(n_theta)]).T
    direction = np.einsum('ij,kj->ikj', azimuthal, polar)

    #Compute average---
    ftheta = (1 - np.cos(2 * theta) * np.sinc(Dtheta / np.pi)) / (
        np.sinc(Dtheta / 2 / np.pi) * (1 - np.cos(2 * theta)))
    fphi = np.sinc(Dphi / 2.0 / np.pi)
    polar_ave = polar * fphi
    azimuthal_ave = np.array(
        [ftheta * np.sin(theta), ftheta * np.sin(theta),
         np.zeros(n_theta)]).T
    direction_ave = np.einsum('ij,kj->ikj', azimuthal_ave, polar_ave)
    direction_int = np.einsum('ijl,ij->ijl', direction_ave, domega)
    #------------------------------------------------------

    n_mfp_bulk = len(mfp_bulk)
    n_mfp = argv.setdefault('n_mfp', 100)

    mfp = np.logspace(np.log10(max([min(mfp_bulk), 1e-11])),
                      np.log10(max(mfp_bulk) * 1.01), n_mfp)

    n_mfp = len(mfp)

    temp_coeff_p, temp_coeff = np.zeros((2, n_mfp, n_phi))
    kappa_directional_p, kappa_directional = np.zeros((2, n_mfp, n_phi, 3))
    #suppression_p,suppression = np.zeros((2,n_mfp,n_phi,n_mfp_bulk))

    dirr = np.einsum('tpi,tp->tpi', direction_ave[:, :, :2], domega)

    kdp = np.zeros((n_mfp, n_phi, 2))
    kd = np.zeros((n_mfp, n_phi, 2))
    tcp = np.zeros((n_mfp, n_phi))
    tc = np.zeros((n_mfp, n_phi))
    p = np.arange(n_phi)

    g1 = kappa_bulk / mfp_bulk
    g2 = kappa_bulk / mfp_bulk / mfp_bulk
    g3 = ftheta * np.sin(theta)
    n_tot = n_mfp_bulk

    block = n_tot // comm.size
    rr = range(block *
               comm.rank, n_tot) if comm.rank == comm.size - 1 else range(
                   block * comm.rank, block * (comm.rank + 1))

    #change to mfp

    for t in range(n_theta):

        for m in rr:

            (m1, a1, m2, a2) = get_linear_indexes(mfp, mfp_bulk[m] * g3[t])

            tmp = g1[m] * dirr[t]
            kdp[m1] += a1 * tmp
            kdp[m2] += a2 * tmp

            #Temperature
            tmp = g2[m] * domega[t]
            tcp[m1] += a1 * tmp
            tcp[m2] += a2 * tmp

            #Suppression
            #tmp  = dirr[t,:,0]/mfp_bulk[m]
            #sp[m1,:,m] +=  a1 * tmp
            #sp[m2,:,m] +=  a2 * tmp

        comm.Allreduce([kdp, MPI.DOUBLE], [kd, MPI.DOUBLE], op=MPI.SUM)
        comm.Allreduce([tcp, MPI.DOUBLE], [tc, MPI.DOUBLE], op=MPI.SUM)
        #comm.Allreduce([sp,MPI.DOUBLE],[s,MPI.DOUBLE],op=MPI.SUM)

    kd *= 2 * 3 / 4.0 / np.pi
    #s *= 3*2/4.0/np.pi
    tc /= np.sum(tc)

    #Test for bulk---

    if comm.rank == 0:
        kappa_bulk = np.zeros((2, 2))
        for m in range(n_mfp):
            for p in range(n_phi):
                kappa_bulk += mfp[m] * np.outer(kd[m, p], polar_ave[p, :2])

    #replicate bulk values---

    #Wod = np.tile(tc,(nm,1))

    #angle_map = np.arange(n_phi)
    #angle_map = np.repeat(angle_map,n_mfp)

    #repeat angle---
    #kappa_directional[:,:,2] = 0 #Enforce zeroflux on z (for visualization purposed)
    #F = np.einsum('m,pi->mpi',mfp,polar_ave)
    rhs_average = mfp_sampled * mfp_sampled / 2
    a = np.zeros((3, 3))
    for i in range(len(polar_ave)):
        a += np.outer(polar_ave[i],
                      polar_ave[i]) / 2 / np.pi * 2 * np.pi / n_phi
    rhs_average = mfp_sampled * mfp_sampled
    rhs_average *= a[0, 0]

    #Final----
    return {'tc':tc,\
            'sigma':kd,\
            'kappa':kappa,\
            'mfp_average':rhs_average*1e18,\
            'VMFP':polar_ave[:,:2],\
            'mfp_sampled':mfp,\
            'sampling': np.array([n_phi,n_theta,n_mfp]),\
            'model':np.array([5]),\
            'suppression':np.zeros(1),\
            'kappam':kappa_bulk}
예제 #43
0
import numpy as np
import matplotlib.pyplot as plt

x = np.linspace(0, 7, 100)

for i in range(7):
    yi = np.sinc(x - i)
    plt.plot(x, yi, c='blue')

plt.xlim([0, 7])
plt.ylim([-0.5, 1])
plt.xlabel('Subcarrier')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
예제 #44
0
import numpy as np
from numpy import sin, linspace, pi,fft
from scipy import fft, arange, ifft, signal
import matplotlib.pyplot as plt   
from random import randint
from math import floor



###valores por defecto
t1 = linspace(-16,16, 101)
sinc = np.sinc(t1)
alpha = 0.22##valor predeterminado
coseno_alzado = (np.sinc(t1) * np.cos(alpha*np.pi*t1))/(1-(4*alpha*alpha*t1*t1))


def tiempo_feecuencia():##funcion que expone los pulsos en el dominio de su tiempo y frecuencia
	

	##pulsos en el tiempo
	plt.subplot(2,1,1)
	plt.title('Funcion sinc en el tiempo')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
	plt.grid(True)
	plt.plot(t1,sinc, 'r')

	plt.subplot(2,1,2)
	plt.title('Funcion coseno alzado en el tiempo en el tiempo')
	plt.xlabel('Tiempo(s)')
	plt.ylabel('Amplitud')
def f(x):
    return np.sinc(x * 10 - 5).sum(axis=1)[:, None]
예제 #46
0
파일: data.py 프로젝트: aushani/summer
def generate_data_2(n_samples=15, stddev=0.2):
    x_samples = np.linspace(0, 2 * np.pi, num=n_samples)
    y_samples = np.sinc(x_samples - np.pi) + np.random.normal(scale=stddev,
                                                              size=n_samples)

    return x_samples, y_samples
예제 #47
0
def fourier2_(w, tau):
    """tau * sinc(tau*W/(2*pi))"""
    return tau * np.sinc(tau * w / (2 * np.pi))
 def setUp(self):
     self.X = np.random.rand(10, 2)
     self.y = np.sinc(self.X * 10 - 5).sum(axis=1)
     self.model = RandomForest()
     self.model.train(self.X, self.y)
예제 #49
0
import numpy as np
import matplotlib.pyplot as plt
import lwz
import pdb
from scipy import signal

pi = np.pi
n = 256
x = np.arange(n)
nn = 30
fwhm = 5.
num = np.arange(nn)
ii = np.zeros(1)

db = np.sinc(0.05 * (x - 127))
dm0 = np.zeros(n)
dm0[127] = 1.
dm0[95] = 0.5

dm = signal.fftconvolve(dm0, db, mode='same')

for i in num:
    if i == 0:
        ii = np.array([i]) + 1

        mch = np.where(np.abs(dm) == np.max(np.abs(dm)))
        mch = (mch[0])[0]
        xm = x[mch]
        xmall = xm
        cb0 = lwz.gauss(x, 127.5, fwhm / 2.35)
        bili = np.max(db) / np.max(cb0)
예제 #50
0
""" ======================  Variable Declaration ========================== """

l = 0  #lower bound on x
u = 10  #upper bound on x
N = 50  #number of samples to generate
gVar = .25  #variance of error distribution
M = 3  #regression model order
""" =======================  Generate Training Data ======================= """
data_uniform = np.array(generateUniformData(N, l, u, gVar)).T

x1 = data_uniform[:, 0]
t1 = data_uniform[:, 1]

x2 = np.arange(l, u, 0.001)  #get equally spaced points in the xrange
t2 = np.sinc(x2)  #compute the true function value
""" ========================  Train the Model ============================= """

w = fitdata(x1, t1, M)
x3 = np.arange(l, u, 0.001)  #get equally spaced points in the xrange
X = np.array([x3**m for m in range(w.size)]).T
t3 = X @ w  #compute the predicted value

plotData(x1, t1, x2, t2, x3, t3,
         ['Training Data', 'True Function', 'Estimated\nPolynomial'])
print(w)
""" ======================== Generate Test Data =========================== """
"""This is where you should generate a validation testing data set.  This 
should be generated with different parameters than the training data!   """
""" ========================  Test the Model ============================== """
""" This is where you should test the validation set with the trained model """
예제 #51
0
import matplotlib.pyplot as plt
import numpy as np
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
 
# DRAW A FIGURE WITH MATPLOTLIB
 
duration = 2
 
fig_mpl, ax = plt.subplots(1,figsize=(5,3), facecolor='white')
xx = np.linspace(-2,2,200) # the x vector
zz = lambda d: np.sinc(xx**2)+np.sin(xx+d) # the (changing) z vector
ax.set_title("Elevation in y=0")
ax.set_ylim(-1.5,2.5)
line, = ax.plot(xx, zz(0), lw=3)
 
# ANIMATE WITH MOVIEPY (UPDATE THE CURVE FOR EACH t). MAKE A GIF.
 
def make_frame_mpl(t):
    line.set_ydata( zz(2*np.pi*t/duration))  # <= Update the curve
    return mplfig_to_npimage(fig_mpl) # RGB image of the figure
 
animation =mpy.VideoClip(make_frame_mpl, duration=duration)
animation.write_videofile("myHolidays_edited.mp4",fps=25)
# animation.write_gif("sinc_mpl.gif", fps=20)
예제 #52
0
def spatialStab(fullTime,
                fullSpace,
                spaceStep,
                frequency,
                timeStep,
                plasmaFreq,
                resonantFreq,
                gamma,
                lim_Of_Stability=np.pi / 2):
    fT = fullTime
    fS = fullSpace
    sp = spaceStep
    los = (np.pi) / 2
    frq = frequency
    rad = 2 * np.pi * frq
    twoPi = rad / frq
    c0 = sci.speed_of_light
    tim = timeStep
    print("timesteps: ", tim, rad * tim)
    if rad * tim > np.pi / 2 and frq <= 5e9:
        print("unstable timestep", rad * tim, tim)
        sys.exit()

    sw = np.sinc(np.pi * frq * tim)
    pf = plasmaFreq
    oldPf = pf
    rf = resonantFreq
    es = (pf**2) / (rf**2) - 1
    gam = gamma
    sqN = (rad**2 * sw**2 - es * rf**2 * np.cos(rad * tim) +
           1.0j * gam * rad * sw)
    sqD = (rad**2 * sw**2 - rf**2 * np.cos(rad * tim) + 1.0j * gam * rad * sw)
    arg = (rad / c0) * (sp / 2) * sw * np.sqrt(sqN / sqD)
    ans = (2 / sp) * np.arcsin(arg)
    kNum = abs(ans)
    epsNum = (((pf)**2))
    epsDom = (rf**2 - (rad**2) - 1j * gam * rad)
    epsilon = 1 + (epsNum / epsDom)
    refr = np.sqrt(abs(np.real(epsilon)))
    print(epsilon, "eps stability")
    print("refr", refr)
    matAdjNum = c0 * tim * np.sin((kNum * refr * sp) / 2)
    matAdjDen = refr * sp * np.sin((kNum * c0 * tim) / 2)
    fix = matAdjNum / matAdjDen
    print(fix, "fix")
    pf = np.sqrt(abs(fix)) * pf
    vpNum = rad / kNum
    lamCont = c0 / frq
    lamDisc = (twoPi) / kNum
    diff = abs(lamCont - lamDisc)
    print("Adjusted plasmaF vs original plasmaF", abs((oldPf - pf) / oldPf))
    #print(lamCont, lamDisc, diff, kNum, vpNum, c0, "lamCont, Disc, diff, kNum, diff in Vp, c0")
    if kNum * sp > los:  # HIGH FREQ LIMIT
        print("unstable, wave is not resolved:", kNum * sp, kNum,
              abs(frq - (c0 / (twoPi / kNum))))
        sys.exit()
    print("domain should be: ", (lamDisc * 5) / sp)
    if kNum * sp * fS < (5 * lamDisc):
        print("unstable because domain too small,", (lamDisc * 5) / sp)
        sys.exit()
    return lamCont, lamDisc, diff, pf, fix
예제 #53
0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import scipy.interpolate as si
import matplotlib.pyplot as mp
min_x, max_x = -2.5, 2.5
con_x = np.linspace(min_x, max_x, 1001)
con_y = np.sinc(con_x)
dis_x = np.linspace(min_x, max_x, 11)
dis_y = np.sinc(dis_x)
# 线性插值
linear = si.interp1d(dis_x, dis_y)
lin_x = np.linspace(min_x, max_x, 51)
lin_y = linear(lin_x)
# 三次样条插值
cubic = si.interp1d(dis_x, dis_y, kind='cubic')
cub_x = np.linspace(min_x, max_x, 51)
cub_y = cubic(cub_x)
mp.figure('Interpolation', facecolor='lightgray')
mp.subplot(221)
mp.title('Continuous', fontsize=16)
mp.ylabel('y', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(con_x, con_y, c='hotpink',
        label='Continuous')
mp.legend()
mp.subplot(222)
mp.title('Discrete', fontsize=16)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
예제 #54
0
    def delay(self, duration=1, channel=0, filter_length=2048):
        """
        Add a delay to one channel.

        Arguments:
            duration (int | float | array-like): duration of the delay in seconds (given a float) or samples (given
                an int). Given an array with the same length as the sound, each sample is delayed by the
                corresponding number of seconds. This option is used by in `slab.Binaural.itd_ramp`.
            channel (int): The index of the channel to add the delay to
            filter_length (int): Must be and even number. determines the accuracy of the reconstruction when
                using fractional sample delays. Defaults to 2048, or the sound length for shorter signals.
        Returns:
            (slab.Signal): a copy of the instance with the specified delay.
        """
        new = copy.deepcopy(self)
        if channel >= self.n_channels:
            raise ValueError(
                'Channel must be smaller than number of channels in sound!')
        if filter_length % 2:
            raise ValueError('Filter_length must be even!')
        if self.n_samples < filter_length:  # reduce the filter_length to the sound length of short signals
            filter_length = self.n_samples - 1 if self.n_samples % 2 else self.n_samples  # make even
        center_tap = int(filter_length / 2)
        t = numpy.array(range(filter_length))
        if isinstance(
                duration,
            (int, float, numpy.int64, numpy.float64)):  # just a constant delay
            duration = Signal.in_samples(duration, self.samplerate)
            if duration > self.n_samples:
                raise ValueError(
                    "Duration of the delay cant be greater longer then the sound!"
                )
            x = t - duration + 1
            window = 0.54 - 0.46 * numpy.cos(
                2 * numpy.pi * (x + 0.5) / filter_length)  # Hamming window
            if numpy.abs(duration) < 1e-10:
                tap_weight = numpy.zeros_like(t)
                tap_weight[center_tap] = 1
            else:
                tap_weight = window * numpy.sinc(x - center_tap)
            new.data[:, channel] = numpy.convolve(self.data[:, channel],
                                                  tap_weight,
                                                  mode='same')
        else:  # dynamic delay
            if len(duration) != self.n_samples:
                ValueError('Duration shorter or longer than sound!')
            duration *= self.samplerate  # assuming vector in seconds, convert to samples
            padding = numpy.zeros(center_tap)
            # for zero-padded convolution (potential edge artifacts!)
            sig = numpy.concatenate((padding, new.channel(channel), padding),
                                    axis=None)
            for i, current_delay in enumerate(duration):
                x = t - current_delay
                window = 0.54 - 0.46 * numpy.cos(
                    2 * numpy.pi * (x + 0.5) / filter_length)  # Hamming window
                if numpy.abs(current_delay) < 1e-10:
                    tap_weight = numpy.zeros_like(t)
                    tap_weight[center_tap] = 1
                else:
                    tap_weight = window * numpy.sinc(x - center_tap)
                    sig_portion = sig[i:i + filter_length]
                    # sig_portion and tap_weight have the same length, so the valid part of the convolution is just
                    # one sample, which gets written into the sound at the current index
                    new.data[i, channel] = numpy.convolve(sig_portion,
                                                          tap_weight,
                                                          mode='valid')
        return new
예제 #55
0
        NTaps = int(a)
    elif o in ("-b", "--sub-bands"):
        NSubBands = int(a)
    elif o in ("-d", "--data-type"):
        DataType = a
    elif o in ("-p", "--no-plot"):
        Plot = False
    else:
        PrintUsage(ProgName)
        sys.exit(1)

M = NTaps * NFFT

# the filter-coefficient-generation section -->
X = numpy.array([(float(i) / NFFT) - (float(NTaps) / 2) for i in range(M)])
PFBCoeff = numpy.sinc(X) * numpy.hanning(M)
# <-- the filter-coefficient-generation section

# create conversion map
if ("signedchar" == DataType):
    Map = numpy.zeros(256, numpy.float32)
    for i in range(0, 128):
        Map[i] = float(i) / 128
    for i in range(128, 256):
        Map[i] = -(float(256 - i) / 128)

# 32-bit (float) coefficients
PFBCoeffFloat32 = numpy.zeros(M * NSubBands, numpy.float32)
# 8-bit (signedchar) coefficients
if ("signedchar" == DataType):
    PFBCoeffInt8 = numpy.zeros(M * NSubBands, numpy.int8)
예제 #56
0
def undulator_field_dfl_SERVAL(dfl, L_w, sig_x=0, sig_y=0, sig_xp=0, sig_yp=0, k_support = 'intensity', s_support='conv_intensities', showfig=False, seed=None):
    filePath = '/home/andrei/Documents/diploma/Diploma/images/'
    _logger.info('Generating undulator field with Serval algorithm')
    w_0 = 2*np.pi * speed_of_light / dfl.xlamds
    
    if showfig:
        plot_dfl_2Dsf(dfl, scale='um', domains='sf', savefig=True, 
                      fig_name = '1-X_noise', filePath=filePath)
        # plot_dfl(dfl, line_off_xy = False, fig_name = '1-X_noise')
    
    dfl.to_domain('sf')
    
    x, y = np.meshgrid(dfl.scale_x(), dfl.scale_y())#, indexing='ij')
    
    mask_xy_ebeam = np.exp(- x**2 / 4 / sig_x**2 - y**2 / 4 / sig_y**2) # 4 because amplitude, not intensity
    # mask_xy_ebeam = np.exp(- x**2 / 2 / sig_x**2 - y**2 / 2 / sig_y**2) # 4 because amplitude, not intensity

    mask_xy_ebeam /= np.sum(mask_xy_ebeam)
    
    # mask_xy_radiation = np.sqrt((1j*(np.pi - 2*special.sici(w_0*(x**2 + y**2)/speed_of_light/L_w)[0]))**2)
    # mask_xy_radiation = 1j*(np.pi - 2*special.sici(w_0*(x**2 + y**2)/speed_of_light/L_w)[0])

    # mask_xy_radiation = (1j*(np.pi - 2*special.sici(w_0*(x**2 + y**2)/speed_of_light/L_w)[0]))**2
    if s_support == 'conv_intensities':
        _logger.info(ind_str +'s_support == "conv"')
        mask_xy_radiation = 1j*(np.pi - 2*scipy.special.sici(w_0*(x**2 + y**2)/speed_of_light/L_w)[0])
        mask_xy = scipy.signal.fftconvolve(mask_xy_radiation**2, mask_xy_ebeam**2, mode='same')
        mask_xy = np.sqrt(mask_xy)
    elif s_support == 'conv_amplitudes':
        _logger.info(ind_str +'s_support == "conv"')
        mask_xy_radiation = 1j*(np.pi - 2*scipy.special.sici(w_0*(x**2 + y**2)/speed_of_light/L_w)[0])
        mask_xy = scipy.signal.fftconvolve(mask_xy_radiation, mask_xy_ebeam, mode='same')
    else:
        _logger.info(ind_str +'s_support == "beam"')
        mask_xy = mask_xy_ebeam
    
    _logger.info(ind_str +'Multiplying by real space mask')
    dfl.fld *= mask_xy
    # dfl.fld *= np.sqrt(mask_xy)
    _logger.info(2*ind_str +'done')

    if showfig:
        # plot_dfl(dfl, domains='s', line_off_xy = False, fig_name = '2-X_e-beam-size')
        plot_dfl_2Dsf(dfl, scale='um', domains='sf', savefig=True,
                      fig_name = '2-X_e-beam-size', filePath=filePath)
        # plot_dfl(dfl, domains='k', line_off_xy = False, fig_name = '2-X_e-beam-size')
        plot_dfl_2Dsf(dfl, scale='um', domains='kf', savefig=True,
                      fig_name = '2-X_e-beam-divergence', filePath=filePath)
                
    dfl.to_domain('kf')

    k_x, k_y = np.meshgrid(dfl.scale_x(), dfl.scale_y())
    mask_kxky_ebeam = np.exp(-k_y**2 / 4 / sig_yp**2 - k_x**2 / 4 / sig_xp**2 ) # 4 because amplitude, not intensity
    # mask_kxky_ebeam = np.exp(-k_y**2 / 2 / sig_yp**2 - k_x**2 / 2 / sig_xp**2 ) # 2 because intensity
    mask_kxky_ebeam /= np.sum(mask_kxky_ebeam)
    
    # mask_kxky_radiation = np.sqrt((np.sinc(w_0 * L_w * (k_x**2 + k_y**2) / 4 / speed_of_light / np.pi))**2)# Geloni2018 Eq.3, domega/omega = 2dgamma/gamma, divided by pi due to np.sinc definition
    # mask_kxky_radiation = (np.sinc(w_0 * L_w * (k_x**2 + k_y**2) / 4 / speed_of_light / np.pi))# Geloni2018 Eq.3, domega/omega = 2dgamma/gamma, divided by pi due to np.sinc definition
        
    mask_kxky_radiation = np.sinc(w_0 * L_w * (k_x**2 + k_y**2) / 4 / speed_of_light / np.pi)# Geloni2018 Eq.3, domega/omega = 2dgamma/gamma, divided by pi due to np.sinc definition

    if k_support == 'intensity':
        _logger.info(ind_str +'k_support == "intensity"')
        mask_kxky = scipy.signal.fftconvolve(mask_kxky_ebeam**2, mask_kxky_radiation**2, mode='same')
        mask_kxky = np.sqrt(mask_kxky[np.newaxis, :, :])
        mask_kxky /= np.sum(mask_kxky)
    elif k_support == 'amplitude':
        _logger.info(ind_str +'k_support == "amplitude"')
        mask_kxky = scipy.signal.fftconvolve(mask_kxky_ebeam, mask_kxky_radiation, mode='same')
        mask_kxky /= np.sum(mask_kxky)
    else:
        raise ValueError('k_support should be either "intensity" or "amplitude"')
    
    # dfl.fld *= mask_kxky[np.newaxis, :, :]
    _logger.info(ind_str +'Multiplying by inverse space mask')
    dfl.fld *= mask_kxky
    _logger.info(2*ind_str +'done')

    if showfig:
        # plot_dfl(dfl, domains='s', fig_name = '3-X_radaition_size')
        plot_dfl_2Dsf(dfl, scale='um', domains='sf', savefig=True, 
                      fig_name = '3-X_radaition_size', filePath=filePath)
        # plot_dfl(dfl, domains='k', fig_name = '3-X_radiation_divergence')
        plot_dfl_2Dsf(dfl, scale='um', domains='kf', savefig=True,
                      fig_name = '3-X_radaition_divergence', filePath=filePath)
    return dfl 
예제 #57
0
 def time_fun(t):
     return np.sinc(omega * (t - 50e-12))
예제 #58
0
def cascade(screenpos,i,nletters):
    v = np.array([0,-1])
    d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4))
    return lambda t: screenpos+v*400*d(t-0.15*i)
예제 #59
0
def run(num=1, T=500):
    X = pd.read_csv('X_set%s.csv' % num, header=None).as_matrix()
    Y = pd.read_csv('y_set%s.csv' % num, header=None).as_matrix().flatten()
    Z = pd.read_csv('z_set%s.csv' % num, header=None).as_matrix().flatten()
    N, D = X.shape
    print X.shape, Y.shape, Z.shape

    a0 = 1e-16
    b0 = 1e-16
    e0 = 1
    f0 = 1

    # params for q(w) - doesn't matter what we set it to, we'll update this first
    C = np.eye(D)
    mu = np.zeros(D)

    # params for q(lambda)
    e = e0
    f = f0

    # params for q(alpha)
    a = np.ones(D) * a0
    b = np.ones(D) * b0
    a0ones = np.ones(D) * a0

    # objective
    L = np.empty(T)

    for t in xrange(T):
        # update q(w)
        C = np.linalg.inv(np.diag(1.0 * a / b) + (1.0 * e / f) * X.T.dot(X))
        mu = C.dot((1.0 * e / f) * X.T.dot(Y))

        # update q(alpha)
        a = a0ones + 0.5
        b = b0 + 0.5 * (np.diag(C) + mu * mu)
        # for k in xrange(D):
        #   a[k] = a0 + 0.5
        #   b[k] = b0 + 0.5*(C[k,k] + mu[k]*mu[k])

        # update q(lambda)
        e = e0 + N / 2.0
        sum_for_f = 0
        # for i in xrange(N):
        #   delta = Y[i] - X[i].dot(mu)
        #   sum_for_f += delta*delta + X[i].dot(C).dot(X[i])
        delta = Y - X.dot(mu)
        sum_for_f = delta.dot(delta) + np.trace(X.dot(C).dot(X.T))
        f = f0 + 0.5 * sum_for_f

        # update L
        L[t] = objective(X, Y, C, mu, a, b, e, f, a0, b0, e0, f0)
        if t % 20 == 0:
            print "t:", t
            if num == 3:
                print "L:", L[t]

    # plot 1/E[alpha]
    plt.plot(b / a)
    plt.show()

    # 1/E[lambda]
    print "1/E[lambda]:", f / e

    # plot L
    plt.plot(L)
    plt.show()

    Yhat = X.dot(mu)
    plt.plot(Z, Yhat)
    plt.scatter(Z, Y)
    plt.plot(Z, 10 * np.sinc(Z))
    plt.show()
# This should give a Fourier transform with a frequency spectrum that is
# entirely real, starts at 1 and goes to 0 at 0.5 the sample rate


def est_autocorr(x):
    """ Estimate autocorrelation by inverse transforming the powerspectrum """
    X = np.fft.fft(x)
    S = X * np.conj(X)
    r = np.fft.ifft(X)
    return r


N = common.get_env('N', default=512, conv=int)
n = np.arange(N)
t = np.arange(-N / 2 + 0.5, N / 2)
t_eval = np.concatenate((np.arange(N / 2), np.arange(1, N / 2 + 1)[::-1] * -1))
a = common.get_env('a', default=0.5, conv=float)
x = a * np.power(np.sinc(t_eval * a), 2)
X = np.fft.fft(x)
r = est_autocorr(x)
fig, axs = plt.subplots(3, 1)
axs[0].plot(n, x)
axs[1].plot(n, np.real(X))
axs[2].plot(n, np.abs(r))
# It really seems that the square of the sinc is its own autocorrelation
# function
print(np.sum(np.abs(x - r)))

plt.show()