def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(dtype_).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ == np.float32:
     tol = 1e-2
   else:
     tol = 1e-7
   with self.test_session():
     tf_a = constant_op.constant(a)
     tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
     for b in tf_e, tf_v:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
Пример #2
0
def equalize_frame(sframe, x_preamble, fft_len, cp_len, cs_len):
    rx_preamble = sframe[cp_len:cp_len + len(x_preamble)]
    agc_factor = calculate_agc_factor(rx_preamble, x_preamble)
    # print('AGC values:', ref_e, rx_e, agc_factor)
    sframe *= agc_factor

    frame_start = cp_len + 2 * fft_len + cs_len + cp_len
    H, e0, e1 = preamble_estimate(rx_preamble, x_preamble, fft_len)
    H_estimate = estimate_frame_channel(H, fft_len, len(sframe))

    H_p = estimate_frame_channel(H, fft_len, fft_len * 9)
    p = sframe[frame_start:frame_start + fft_len * 9]

    P = np.fft.fft(p)
    P *= np.fft.fftshift(np.conj(H_p))
    p = np.fft.ifft(P)
    print('equalize p:', utils.calculate_signal_energy(p))

    F = np.fft.fft(sframe)
    F *= np.fft.fftshift(np.conj(H_estimate))
    sframe = np.fft.ifft(F)

    s = sframe[frame_start:frame_start + fft_len * 9]
    print('equalize s:', utils.calculate_signal_energy(s))
    # plt.plot(s.real)
    # plt.plot(p.real)
    # plt.plot(s.imag)
    # plt.plot(p.imag)
    # plt.plot(np.abs(P))
    # plt.plot(np.abs(F))
    # # plt.plot(np.abs(P - F))
    # plt.show()

    return sframe
Пример #3
0
def nufft_T(N, J, K, tol, alpha, beta):
    '''
     equation (29) and (26)Fessler's paper
     the pseudo-inverse of T  
     '''
    import scipy.linalg
    L = numpy.size(alpha) - 1
    cssc = numpy.zeros((J,J));
    [j1, j2] = numpy.mgrid[1:J+1, 1:J+1]
    for l1 in xrange(-L,L+1):
        for l2 in xrange(-L,L+1):
            alf1 = alpha[abs(l1)]
            if l1 < 0: alf1 = numpy.conj(alf1)
            alf2 = alpha[abs(l2)]
            if l2 < 0: alf2 = numpy.conj(alf2)
            tmp = j2 - j1 + beta * (l1 - l2)
            tmp = numpy.sinc(1.0*tmp/(1.0*K/N)) # the interpolator
            cssc = cssc + alf1 * numpy.conj(alf2) * tmp;
            #print([l1, l2, tmp ])
    u_svd, s_svd, v_svd= scipy.linalg.svd(cssc)
    smin=numpy.min(s_svd)
    if smin < tol:
        tol=tol
        print('Poor conditioning %g => pinverse', smin)
    else:
        tol= 0.0
    for jj in xrange(0,J):
        if s_svd[jj] < tol/10:
            s_svd[jj]=0
        else:
            s_svd[jj]=1/s_svd[jj]      
    s_svd= scipy.linalg.diagsvd(s_svd,len(u_svd),len(v_svd))
    cssc = numpy.dot(  numpy.dot(v_svd.conj().T,s_svd), u_svd.conj().T)
    return cssc 
Пример #4
0
    def minZerrKernSHG_naive(self):
        Et0 = self.Et_cla.get()
        Esig = self.Esig_t_tau_p_cla.get()
        dZ = self.dZ_cla.get()
        N = Esig.shape[0]

        mx = 0.0
        X = np.zeros(5)

        for t in range(N):
            for tau in range(N):
                T = np.abs(Esig[tau, t]) ** 2
                if mx < T:
                    mx = T
                tp = t - (tau - N / 2)
                if tp >= 0 and tp < N:
                    dZdZ = dZ[t] * dZ[tp]
                    dZE = dZ[t] * Et0[tp] + dZ[tp] * Et0[t]
                    DEsig = Et0[t] * Et0[tp] - Esig[tau, t]

                    X[0] += np.abs(dZdZ) ** 2
                    X[1] += 2.0 * np.real(dZE * np.conj(dZdZ))
                    X[2] += 2.0 * np.real(DEsig * np.conj(dZdZ)) + np.abs(dZE) ** 2
                    X[3] += 2.0 * np.real(DEsig * np.conj(dZE))
                    X[4] += np.abs(DEsig) ** 2
        T = N * N * mx
        X[0] = X[0] / T
        X[1] = X[1] / T
        X[2] = X[2] / T
        X[3] = X[3] / T
        X[4] = X[4] / T

        root.debug("".join(("Esig_t_tau_p norm max: ", str(mx))))

        return X
Пример #5
0
def childGeneration(N, particles, tree, i):
    maxInCell = 3
    p, t = noOfParticlesInside(particles, N[i])
    N[i].particleIndex = t
    if p > maxInCell:
        t = len(N)
        tree[i] = [t, t + 1, t + 2, t + 3]
        c = N[i].center
        v = N[i].vertex
        N.append(Node((c - (v - c) / 2), c, t))
        N.append(Node((c + (v - c) / 2), v, t + 1))
        N.append(Node(c + (conj(v - c) / 2), real(v) + 1j * imag(c), t + 2))
        N.append(Node(c - (conj(v - c) / 2), real(c) + 1j * imag(v), t + 3))
        tree.append([])
        tree.append([])
        tree.append([])
        tree.append([])
        for ch in tree[i]:
            childGeneration(N, particles, tree, ch)

        N[i].aj = zeros(noOfTerms) * 1j
        for j in range(1, noOfTerms + 1):
            for k in range(1, j + 1):
                for chi in tree[i]:
                    N[i].aj[j - 1] += (
                        N[chi].aj[k - 1] * combitorial(j - 1, k - 1) * pow((-N[i].center + N[chi].center), j - k)
                    )

    else:
        N[i].aj = zeros(noOfTerms) * 1j
        for j in range(1, noOfTerms + 1):
            for tempIdx in t:
                N[i].aj[j - 1] += particles[tempIdx].strength * pow((particles[tempIdx].xy - N[i].center), (j - 1))
Пример #6
0
def Kmat(alpha,x,pAve):

    K = np.zeros((Nb,Nb),dtype=complex)

    ar = alpha.real 

    for j in range(Nb): 
        K[j,j] = np.abs(alpha)**2 / ar * (2. * j + 1.)/2. +  pAve**2 
    
    for j in range(1,Nb):
        K[j-1,j] = -1j*np.conj(alpha) * pAve * np.sqrt(2. * j / ar)
        K[j,j-1] = np.conj(K[j-1,j])

    if Nb > 2: 
        for j in range(2,Nb):
            K[j-2,j] = - np.sqrt(float((j-1)*j)) * np.conj(alpha)**2 / 2. / ar  
            K[j,j-2] = np.conj(K[j-2,j])
    

    #K[0,0] = np.abs(alpha)**2/alpha.real / 2. + pAve**2
    #K[1,1] = np.abs(alpha)**2/alpha.real * 3.0 / 2. + pAve**2 

    #K[0,1] = -1j*np.conj(alpha) * pAve * np.sqrt(2.*j/alpha.real)
    #K[1,0] = np.conj(K[0,1])
    K = K / (2.*am) 

    return K 
Пример #7
0
def Dmat(alpha,xAve,pAve):

    D = np.zeros((Nb,Nb),dtype=complex)

    V0, V1, ak  = Hessian(xAve) 

    # time derivative 
    dq = pAve / am
    dp = - V1
    da = (-1.0j/am * alpha**2 + 1.j * ak) 
    
    a = alpha.real 

    for k in range(Nb):
        D[k,k] = - 1j*da.imag/2./a * (float(k) + 0.5) - 1j * pAve**2/am    
    
    for k in range(1,Nb):
        D[k-1,k] = np.sqrt(float(k)/2./a) * ( - np.conj(alpha) * dq + 1j * dp) 
        D[k,k-1] = np.sqrt(float(k)/2./a) * ( alpha * dq + 1j * dp )
    
    if Nb > 2: 
        for k in range(2,Nb):
            D[k-2,k] = np.conj(da)/2./a * np.sqrt(float(k*(k-1)))/2.0 
            D[k,k-2] = - da/2./a * np.sqrt(float(k*(k-1)))/2.0
            


    #D[0,0] = - 1j * pAve**2 / am 
    #D[1,1] = - 1j * pAve**2 / am 
    #D[0,1] = - (np.conj(alpha)*pAve/am + 1j * V1) / np.sqrt(2.*alpha.real)
    #D[1,0] = (alpha * pAve / am - 1j * V1) / np.sqrt(2.*alpha.real)

    return D 
def levdown(anxt, enxt=None):
    """One step backward Levinson recursion

    :param anxt:
    :param enxt:
    :return: 
        * acur the P'th order prediction polynomial based on the P+1'th order prediction polynomial, anxt.
        * ecur the the P'th order prediction error  based on the P+1'th order prediction error, enxt. 

    ..  * knxt the P+1'th order reflection coefficient.

    """
    #% Some preliminaries first
    #if nargout>=2 & nargin<2
    #    raise ValueError('Insufficient number of input arguments');
    if anxt[0] != 1:
        raise ValueError('At least one of the reflection coefficients is equal to one.')
    anxt = anxt[1:] #  Drop the leading 1, it is not needed 
                    #  in the step down

    # Extract the k+1'th reflection coefficient
    knxt = anxt[-1]
    print(knxt)
    if knxt == 1.0:
        raise ValueError('At least one of the reflection coefficients is equal to one.')
    print(anxt[0:-1],knxt*numpy.conj(anxt[-2::-1]),anxt[0:-1],anxt[0:-1]-knxt*numpy.conj(anxt[-2::-1]),(1.-abs(knxt)**2))
    # A Matrix formulation from Stoica is used to avoid looping 
    acur = (anxt[0:-1]-knxt*numpy.conj(anxt[-2::-1]))/(1.-abs(knxt)**2)
    ecur = None
    if enxt is not None:
        ecur = enxt/(1.-numpy.dot(knxt.conj().transpose(),knxt))
    print(acur)
    acur = numpy.insert(acur, 0, 1)
    
    return acur, ecur
Пример #9
0
def get_modal_volume(sim, box=None, dft_cell=None, nf=0):

    Exyz=[mp.Ex, mp.Ey, mp.Ez]

    if dft_cell is None:
      (Ex,Ey,Ez) = [sim.get_array(vol=box, component=c, cmplx=True) for c in Exyz]
      (X,Y,Z,W)  = sim.get_array_metadata(vol=box)
      Eps        = sim.get_array(vol=box, component=mp.Dielectric)
    else:
      (Ex,Ey,Ez) = [sim.get_dft_array(dft_cell, c, nf) for c in Exyz]
      (X,Y,Z,W)  = sim.get_dft_array_metadata(dft_cell=dft_cell)
      # slightly annoying: we need an epsilon array with empty dimensions collapsed,
      #  something not currently provided by any C++ function; for now just
      #  create it via brute-force python loop, although this could get slow.
      Eps=np.zeros(0)
      for x in X:
        for y in Y:
          for z in Z:
            Eps=np.append(Eps,sim.get_epsilon_point(mp.Vector3(x,y,z)))
      Eps=np.reshape(Eps,np.shape(W))

    EpsE2 = np.real(Eps*(np.conj(Ex)*Ex + np.conj(Ey)*Ey + np.conj(Ez)*Ez))
    num   = np.sum(W*EpsE2)
    denom = np.max(EpsE2)

    return num/denom if denom!=0.0 else 0.0
Пример #10
0
 def apply_filter(self, xs_ft, ys_ft):
     r"""Apply the filter to the FFT'd values.
     
     Parameters
     ----------
     xs_ft : array_like
         The fourier transform of the x slopes
     ys_ft : array_like
         The fourier transform of the y slopes
         
     Returns
     -------
     est_ft : array_like
         The fourier transform of the phase estimate.
     
     Notes
     -----
     
     This implements the equation
     
     .. math::
         
         \hat{\Phi} = \frac{G_{wx}^{*} X + 
         G_{wy}^{*} Y}{|G_{wx}|^{2} + |G_{wy}|^2}
     
     """
     return ((np.conj(self.gx) * xs_ft + np.conj(self.gy) * ys_ft)
             / self.denominator)
Пример #11
0
    def mfunc(uv, p, d, f):
        global curtime
        crd,t,(i,j) = p
        bl = a.miriad.ij2bl(i,j)
        if bl in conj_bls: d = n.conj(d)
        if is_run1(t):
            #if i == 2 or j == 2: return p, None, None
            d = n.conj(d)
            i,j = rewire_run1[i], rewire_run1[j]
            uvo['pol'] = a.miriad.str2pol['xx']
        elif is_run2(t):
            #if i == 2 or j == 2: return p, None, None
	    d = n.conj(d)
	    i,j = rewire_run2[i], rewire_run2[j]
            uvo['pol'] = a.miriad.str2pol['xx']
        else: return p, None, None
        if i > j: i,j,d = j,i,n.conj(d)
        if curtime != t:
            #if is_run1(t): print 'Processing as run 1'
            #elif is_run2(t): print 'Processing as run 2'
            curtime = t
            aa.set_jultime(t)
            uvo['lst'] = aa.sidereal_time()
            uvo['ra'] = aa.sidereal_time()
            uvo['obsra'] = aa.sidereal_time()
        p = crd,t,(i,j)
        return p,d,f
Пример #12
0
def vis_cal(visdata,vismodel,max_iter=2000,threshold=0.0001):

	nchan = visdata.shape[-1]
	nant = visdata.shape[0]
	gains=NP.ones((nant,nchan),dtype=NP.complex64)

	# set up bookkeeping
	ant1 = NP.arange(nant)

	chi_history = NP.zeros((max_iter,nchan))

	tries = 0.0
	change = 100.0
	A=NP.zeros((nant**2,nant),dtype=NP.complex64) # matrix for minimization
	ind1 = NP.arange(nant**2)
	ind2 = NP.repeat(NP.arange(nant),nant)
	ind3 = NP.tile(NP.arange(nant),nant)
	for fi in xrange(nchan):
		tempgains = gains[:,fi].copy()
		tempdata = visdata[:,:,fi].reshape(-1)
		tempmodel = vismodel[:,:,fi].reshape(-1)
		while (tries < max_iter) and (change > threshold):
			chi_history[tries,fi] = NP.sum(tempdata-NP.outer(tempgains,NP.conj(tempgains))*tempmodel)
			prevgains = tempgains.copy()
			A[ind1,ind2] = tempmodel*NP.conj(prevgains[ind3])
			tempgains = NP.linalg.lstsq(A, tempdata)[0]
			change = NP.median(NP.abs(tempgains-prevgains)/NP.abs(prevgains))
			tries += 1
		if tries == max_iter:
			print 'Warning! Vis calibration failed to converge. Continuing'
		gains[:,fi] = tempgains.copy()

	return gains
Пример #13
0
def alignShells(alm, blm, lmax, Lgrid):

    L = lmax+1
    euler = sampling_grid(Lgrid)
    N = np.size(euler, 0)

    Corr = np.zeros(N, dtype=complex)
    for l in range(L):
        for m in range(-l,l+1):
            for n in range(-l,l+1):
                dlmn = wignerD(l,m,n)(euler[:,0],euler[:,1],euler[:,2])

                a = hp2lm(alm, l, m, lmax)
                b = hp2lm(blm,l,n,lmax)
                Corr+= a*np.conj(b)*np.conj(dlmn)

    x = np.where(Corr==max(Corr))
    x = x[0]

    # now we rotate back blm         
    blmr = rotatealm(blm,lmax,euler[x,0][0],euler[x,1][0],euler[x,2][0])

    hpblmr = stand2hp(blmr,lmax)

    return hpblmr
Пример #14
0
def deconvolve(signal, HRFs, SNRs):
	"""Deconvolve signal using Wiener deconvolution."""
	H = fft(HRFs, len(signal), axis=0)
	wiener_filter = np.conj(H) / (H * np.conj(H) + 1 / SNRs**2)
	deconvolved = np.real(ifft(wiener_filter * fft(signal, axis=0), axis=0))

	return deconvolved
Пример #15
0
def visualize_dft_flux(sim, superpose=True, flux_cells=[],
                       options=None, nf=0):

    if not mp.am_master():
        return
    options=options if options else def_flux_options

    # first pass to get arrays of poynting flux strength for all cells
    if len(flux_cells)==0:
        flux_cells=[cell for cell in sim.dft_objects if is_flux_cell(cell)]
    flux_arrays=[]
    for cell in flux_cells:    # first pass to compute flux data
        (x,y,z,w,c,EH)=unpack_dft_cell(sim,cell,nf=nf)
        flux_arrays.append( 0.25*np.real(w*(np.conj(EH[0])*EH[3] - np.conj(EH[1])*EH[2])) )

    # second pass to plot
    for n, cell in enumerate(flux_cells):    # second pass to plot
        if superpose==False:
            if n==0:
                plt.figure()
                plt.title('Poynting flux')
            plt.subplot(len(flux_cells),1,n)
            plt.gca().set_title('Flux cell {}'.format(n))
        cn,sz=mp.get_center_and_size(cell.where)
        max_flux=np.amax([np.amax(fa) for fa in flux_arrays])
        plot_data_curves(sim, center=cn, size=sz, data=[flux_arrays[n]],
                         superpose=superpose, options=options,
                         labels=['flux through cell {}'.format(n)],
                         dmin=-max_flux,dmax=max_flux)
Пример #16
0
    def button_press(self, event):
        """ Handle button presses, detect if we are going to move
        any poles/zeros"""
        # find closest pole/zero
        if (event.xdata != None and event.ydata != None):

            new = event.xdata + 1.0j*event.ydata

            tzeros = list(abs(self.zeros-new))
            tpoles = list(abs(self.poles-new))

            if (size(tzeros) > 0):
                minz = min(tzeros)
            else:
                minz = float('inf')
            if (size(tpoles) > 0):
                minp = min(tpoles)
            else:
                minp = float('inf')

            if (minz < 2 or minp < 2):
                if (minz < minp):
                    # Moving zero(s)
                    self.index1 = tzeros.index(minz)
                    self.index2 = list(self.zeros).index(
                        conj(self.zeros[self.index1]))
                    self.move_zero = True
                else:
                    # Moving pole(s)
                    self.index1 = tpoles.index(minp)
                    self.index2 = list(self.poles).index(
                        conj(self.poles[self.index1]))
                    self.move_zero = False
Пример #17
0
    def mouse_move(self, event):
        """ Handle mouse movement, redraw pzmap while drag/dropping """
        if (self.move_zero != None and
            event.xdata != None and 
            event.ydata != None):

            if (self.index1 == self.index2):
                # Real pole/zero
                new = event.xdata
                if (self.move_zero == True):
                    self.zeros[self.index1] = new
                elif (self.move_zero == False):
                    self.poles[self.index1] = new
            else:
                # Complex poles/zeros
                new = event.xdata + 1.0j*event.ydata
                if (self.move_zero == True):
                    self.zeros[self.index1] = new
                    self.zeros[self.index2] = conj(new)
                elif (self.move_zero == False):
                    self.poles[self.index1] = new
                    self.poles[self.index2] = conj(new)
            tfcn = None
            if (self.move_zero == True):
                self.tfi.set_zeros(self.zeros)
                tfcn = self.tfi.get_tf()
            elif (self.move_zero == False):
                self.tfi.set_poles(self.poles)
                tfcn = self.tfi.get_tf()
            if (tfcn != None):
                self.draw_pz(tfcn)
                self.canvas_pzmap.draw()
Пример #18
0
 def energy(self,R1,M0):
     R0=np.tensordot(M0,self.MPO[0],axes=(0,0))
     R0=np.tensordot(R0,np.conj(M0),axes=(2,0))
     R0=np.transpose(R0,[0,2,4,1,3,5])
     energy1=np.squeeze(np.tensordot(R0,R1,axes=([3,4,5],[0,1,2])))
     norm=np.tensordot(M0,np.conj(M0),axes=([0,1,2],[0,1,2]))
     return energy1/norm
Пример #19
0
    def correlation(self,M,operator,site_i,site_j):
        """
        Calculation for the correlation between site_i and site_j <|OiOj|>-<Oi><Oj>
        :param operator: operator

        """
        minsite = min(site_i,site_j)
        maxsite = max(site_i,site_j)
        u = np.array([[1]])
        for i in range(0,minsite):
            M[i] = np.tensordot(u, M[i],axes=(-1,1)).transpose(1,0,2)
            l,u = self.left_cannonical(M[i])
            M[i] = l
        M[minsite] = np.tensordot(u, M[minsite]).transpose(1,0,2)
        MP = np.tensordot(M[minsite],operator,axes=(0,0))
        MPI = np.tensordot(MP, np.conj(M[minsite]),axes=(-1,0))
        MPI = MPI.transpose([0,2,1,3])
        for i in range(minsite+1,maxsite):
            MI = np.tensordot(MPI, M[i],axes=(2,1))
            MPI = np.tensordot(MI, np.conj(M[i]), axes=([3,2],[0,1]))

        MP = np.tensordot(M[maxsite],operator,axes=(0,0))
        MPJ = np.tensordot(MP, np.conj(M[maxsite]),axes=(-1,0))
        MPJ = MPJ.transpose([0,2,1,3])

        product = np.tensordot(MPI,MPJ, axes=([2,3,0,1]))
        correlation = np.trace(product)

        return correlation
Пример #20
0
 def right_sweep(self,R,M):
     """
     :param R: The right operator
     :param M:
     :return:
     """
     H=np.tensordot(self.MPO[0],R[1],axes=(3,1))
     H=np.squeeze(H)
     H=np.transpose(H,[0,2,1,3])
     d0,d1,d2,d3=H.shape
     H=np.reshape(H,[d0*d1,d2*d3])
     w,v=ssl.eigsh(H,which='SA',k=1,maxiter=5000)
     v=np.reshape(v,[self.d,1,d0*d1//self.d])
     l,u=self.left_cannonical(v)
     M[0]=l
     L=[[] for i in range(len(R))]
     L[0]=np.tensordot(l,self.MPO[0],axes=(0,0))
     L[0]=np.tensordot(L[0],np.conj(l),axes=(2,0))
     L[0]=np.transpose(L[0],[0,2,4,1,3,5])
     for i in range(1,len(R)-1):
         H=np.tensordot(self.MPO[i],R[i+1],axes=(3,1))
         H=np.tensordot(L[i-1],H,axes=(4,2))
         H=H.squeeze()
         H=np.transpose(H,[2,0,4,3,1,5])
         d1,d2,d3,d4,d5,d6=H.shape
         H=np.reshape(H,[d1*d2*d3,d1*d2*d3])
         w,v=ssl.eigsh(H,which='SA',k=1,maxiter=5000)
         v = np.reshape(v, [d1, d2, d3])
         l, u = self.left_cannonical(v)
         M[i] = l
         Li = np.tensordot(L[i-1],l,axes=(3,1))
         Li = np.tensordot(Li, self.MPO[i],axes=([5,3],[0,2]))
         L[i] = np.tensordot(Li, np.conj(l),axes=([3,5],[1,0]))
     M[-1]=np.tensordot(u,M[-1],axes=(1,1))
     return L,M
Пример #21
0
 def left_sweep(self,L,M):
     H = np.tensordot(L[-2], self.MPO[-1], axes=(4, 2))
     H = np.squeeze(H)
     H = np.transpose(H, [2,0,3,1])
     d0,d1,d2,d3=H.shape
     H = np.reshape(H, [d0*d1, d2 * d3])
     w, v = ssl.eigsh(H, which='SA', k=1,maxiter=5000)
     v=np.reshape(v,[self.d,d0*d1//self.d,1])
     v,u=self.right_canonical(v)
     M[-1] = v
     R=[[] for i in range(len(L))]
     R[-1]=np.tensordot(v,self.MPO[-1],axes=(0,0))
     R[-1]=np.tensordot(R[-1],np.conj(v),axes=[2,0])
     R[-1]=np.transpose(R[-1],[0,2,4,1,3,5])
     for i in range(1,len(L)-1):
         H = np.tensordot(L[-(i+2)],self.MPO[-(i+1)], axes=(4, 2))
         H = np.tensordot(H,R[-i], axes=(7, 1))
         H = np.squeeze(H)
         H = np.transpose(H, [2,0, 4, 3, 1, 5])
         d0,d1,d2,d3,d4,d5=H.shape
         H = np.reshape(H, [d0*d1*d2, d3*d4*d5])
         w, v = ssl.eigsh(H, which='SA', k=1,maxiter=5000)
         v=np.reshape(v,[d0,d1,d2])
         v,u=self.right_canonical(v)
         M[-(i+1)] = v
         Ri = np.tensordot(np.conj(v),R[-i],axes=(2,2))
         Ri = np.tensordot(self.MPO[-(i+1)],Ri,axes=([1,3],[0,3]))
         R[-(i+1)] = np.tensordot(v, Ri,axes=([0,2],[0,3]))
     M[0]=np.tensordot(M[0],u,axes=(2,0))
     return R,M
Пример #22
0
def autocorr_fft(signal, axis = -1):
    """Return full autocorrelation along specified axis. Use fft
    for computation."""
    if N.ndim(signal) == 0:
        return signal
    elif signal.ndim == 1:
        n       = signal.shape[0]
        nfft    = int(2 ** nextpow2(2 * n - 1))
        lag     = n - 1
        a       = fft(signal, n = nfft, axis = -1)
        au      = ifft(a * N.conj(a), n = nfft, axis = -1)
        return N.require(N.concatenate((au[-lag:], au[:lag+1])), dtype = signal.dtype)
    elif signal.ndim == 2:
        n       = signal.shape[axis]
        lag     = n - 1
        nfft    = int(2 ** nextpow2(2 * n - 1))
        a       = fft(signal, n = nfft, axis = axis)
        au      = ifft(a * N.conj(a), n = nfft, axis = axis)
        if axis == 0:
            return N.require(N.concatenate( (au[-lag:], au[:lag+1]), axis = axis), \
                    dtype = signal.dtype)
        else:
            return N.require(N.concatenate( (au[:, -lag:], au[:, :lag+1]), 
                        axis = axis), dtype = signal.dtype)
    else:
        raise RuntimeError("rank >2 not supported yet")
Пример #23
0
 def bartlet(self, sv): 
   ''' Bartlet's estimator for DOA. Use `argmax`. ''' 
   V = self.signal_vector 
   G = sv.steering_vectors[self.site_id] 
   self.bearing = sv.bearings[self.site_id]
   left_half = np.dot(V, np.conj(np.transpose(G))) 
   return np.real(left_half * np.conj(left_half)) 
Пример #24
0
def test1():
    print("*** MPS tests started ***")
    (N,chi,d) = (7,10,2)
    A = randomMPS(N,chi,d)
    state = getState(A)
    state = state/np.sqrt(np.dot(np.conj(state),state))
    prod = np.dot(np.conj(state),state)
    approxA = getMPS(state,2)
    approxState = getState(approxA)
    approxProd = np.dot(np.conj(approxState),approxState)
    relErr = approxProd/prod - 1
    S = entropy(state)
    print("State total %d elements"%state.size)
    print("MPS total %d elements"%A.size)
    print("(N,chi,d) = (%d,%d,%d)"%(N,chi,d))
    print("Expected:        (%f,%f)"%polar(prod))
    print("SVD:             (%f,%f)"%polar(innerProduct(approxA,approxA)))
    print("Product:         (%f,%f)"%polar(approxProd))
    print("Relative error:  %f"%np.absolute(relErr))
    print("Entropy:         %f"%S)
    print("")
    # state = np.ones(d**N)/np.sqrt(2)**N
    # state = np.zeros(2**10)
    # state[0] = 1/np.sqrt(2)
    # state[-1] = 1/np.sqrt(2)
    state = np.random.rand(d**N)
    state = state/np.linalg.norm(state)
    
    mps = getMPS(state,4)
    print("Expected: (%f,%f)"%polar(np.inner(state,state)))
    print("MPS:      (%f,%f)"%polar(innerProduct(mps,mps)))
    print("*** MPS tests finished ***\n")
Пример #25
0
def par_xstep(i):
    r"""Minimise Augmented Lagrangian with respect to
    :math:`\mathbf{x}_{G_i}`, one of the disjoint problems of optimizing
    :math:`\mathbf{x}`.

    Parameters
    ----------
    i : int
      Index of grouping to update

    """
    global mp_X
    global mp_DX
    YU0f = sl.rfftn(mp_Y0[[i]] - mp_U0[[i]], mp_Nv, mp_axisN)
    YU1f = sl.rfftn(mp_Y1[mp_grp[i]:mp_grp[i+1]] -
                    1/mp_alpha*mp_U1[mp_grp[i]:mp_grp[i+1]], mp_Nv, mp_axisN)
    if mp_Cd == 1:
        b = np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]) * YU0f + mp_alpha**2*YU1f
        Xf = sl.solvedbi_sm(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                            mp_cache[i], axis=mp_axisM)
    else:
        b = sl.inner(np.conj(mp_Df[mp_grp[i]:mp_grp[i+1]]), YU0f,
                     axis=mp_C) + mp_alpha**2*YU1f
        Xf = sl.solvemdbi_ism(mp_Df[mp_grp[i]:mp_grp[i+1]], mp_alpha**2, b,
                              mp_axisM, mp_axisC)
    mp_X[mp_grp[i]:mp_grp[i+1]] = sl.irfftn(Xf, mp_Nv,
                                            mp_axisN)
    mp_DX[i] = sl.irfftn(sl.inner(mp_Df[mp_grp[i]:mp_grp[i+1]], Xf,
                                  mp_axisM), mp_Nv, mp_axisN)
Пример #26
0
def calc_stokes(Ex,Ey,s=None):
    
    if len(Ex) != len(Ey):
        raise ValueError('Ex and Ey dimentions do not match')
        
    if s is None:
        s = np.arange(len(Ex))
    
    Ex_ = np.conj(Ex)
    Ey_ = np.conj(Ey)
    
    Jxx = Ex * Ex_
    Jxy = Ex * Ey_
    Jyx = Ey * Ex_
    Jyy = Ey * Ey_
    
    del (Ex_,Ey_)
    
    S = StokesParameters()
    S.sc = s
    S.s0 = real(Jxx + Jyy)
    S.s1 = real(Jxx - Jyy)
    S.s2 = real(Jxy + Jyx)
    S.s3 = real(1j * (Jyx - Jxy))
    
    return S
Пример #27
0
def test_solve_fredholm_reconstr_ac():
    """
        here we see that the reconstruction quality is independent of the integration weights

        differences occur when checking validity of the interpolated time continuous Fredholm equation
    """
    _WC_ = 2
    def lac(t):
        return np.exp(- np.abs(t) - 1j*_WC_*t)
    t_max = 10
    tol = 2e-10
    for ng in range(11,500,30):
        t, w = sp.method_kle.get_mid_point_weights_times(t_max, ng)
        r = lac(t.reshape(-1,1)-t.reshape(1,-1))
        _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w)
        _eig_vec_ast = np.conj(_eig_vec)  # (N_gp, N_ev)
        tmp = _eig_val.reshape(1, -1) * _eig_vec  # (N_gp, N_ev)
        recs_bcf = np.tensordot(tmp, _eig_vec_ast, axes=([1], [1]))
        rd = np.max(np.abs(recs_bcf - r) / np.abs(r))
        assert rd < tol, "rd={} >= {}".format(rd, tol)

        t, w = sp.method_kle.get_simpson_weights_times(t_max, ng)
        r = lac(t.reshape(-1, 1) - t.reshape(1, -1))
        _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w)
        _eig_vec_ast = np.conj(_eig_vec)  # (N_gp, N_ev)
        tmp = _eig_val.reshape(1, -1) * _eig_vec  # (N_gp, N_ev)
        recs_bcf = np.tensordot(tmp, _eig_vec_ast, axes=([1], [1]))
        rd = np.max(np.abs(recs_bcf - r) / np.abs(r))
        assert rd < tol, "rd={} >= {}".format(rd, tol)
Пример #28
0
    def test_adjoints(exp, freqs):

        deltas = exp.m0.mesh.deltas

        m1_ = exp.m1.asarray()

        lindata = exp.lin_results["simdata"]
        dhat = exp.dhat

        adjmodel = exp.adj_results["imaging_condition"].asarray()

        temp_data_prod = 0.0
        for nu in freqs:
            temp_data_prod += np.dot(lindata[nu].reshape(dhat[nu].shape), np.conj(dhat[nu]))

        pt1 = temp_data_prod

        pt2 = np.dot(m1_.T, np.conj(adjmodel)).squeeze() * np.prod(deltas)

        print "{0}: ".format(exp.name)
        print "<Fm1, d>             = {0: .4e} ({1:.4e})".format(pt1, np.linalg.norm(pt1))
        print "<m1, F*d>            = {0: .4e} ({1:.4e})".format(pt2, np.linalg.norm(pt2))
        print "<Fm1, d> - <m1, F*d> = {0: .4e} ({1:.4e})".format(pt1 - pt2, np.linalg.norm(pt1 - pt2))

        print "Relative error       = {0: .4e}\n".format(np.linalg.norm(pt1 - pt2) / np.linalg.norm(pt1))
Пример #29
0
def deconstruct_single_qubit_matrix_into_angles(
        mat: np.ndarray) -> Tuple[float, float, float]:
    """Breaks down a 2x2 unitary into more useful ZYZ angle parameters.

    Args:
        mat: The 2x2 unitary matrix to break down.

    Returns:
        A tuple containing the amount to phase around Z, then rotate around Y,
        then phase around Z (all in radians).
    """
    # Anti-cancel left-vs-right phase along top row.
    right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi
    mat = np.dot(mat, _phase_matrix(-right_phase))

    # Cancel top-vs-bottom phase along left column.
    bottom_phase = cmath.phase(mat[1, 0] * np.conj(mat[0, 0]))
    mat = np.dot(_phase_matrix(-bottom_phase), mat)

    # Lined up for a rotation. Clear the off-diagonal cells with one.
    rotation = math.atan2(abs(mat[1, 0]), abs(mat[0, 0]))
    mat = np.dot(_rotation_matrix(-rotation), mat)

    # Cancel top-left-vs-bottom-right phase.
    diagonal_phase = cmath.phase(mat[1, 1] * np.conj(mat[0, 0]))

    # Note: Ignoring global phase.
    return right_phase + diagonal_phase, rotation * 2, bottom_phase
Пример #30
0
def levup(acur, knxt, ecur=None):
    """LEVUP  One step forward Levinson recursion

    :param acur:
    :param knxt:
    :return: 
        * anxt the P+1'th order prediction polynomial based on the P'th order prediction polynomial, acur, and the 
          P+1'th order reflection coefficient, Knxt.
        * enxt the P+1'th order prediction  prediction error, based on the P'th order prediction error, ecur.  


    :References:  P. Stoica R. Moses, Introduction to Spectral Analysis  Prentice Hall, N.J., 1997, Chapter 3.
    """
    if acur[0] != 1:
        raise ValueError("At least one of the reflection coefficients is equal to one.")
    acur = acur[1:]  #  Drop the leading 1, it is not needed

    # Matrix formulation from Stoica is used to avoid looping
    anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate((numpy.conj(acur[-1::-1]), [1]))

    enxt = None
    if ecur != None:
        # matlab version enxt = (1-knxt'.*knxt)*ecur
        enxt = (1.0 - numpy.dot(numpy.conj(knxt), knxt)) * ecur

    anxt = numpy.insert(anxt, 0, 1)

    return anxt, enxt
Пример #31
0
def apply_caltable_uvfits(gaincaltable,
                          datastruct,
                          filename_out,
                          cal_amp=False):
    """apply a calibration table to a uvfits file
       Args:
        caltable (Caltable) : a gaincaltable object
        datastruct (Datastruct) :  input data structure in EHTIM format
        filename_out (str) :  uvfits output file name
        cal_amp (bool): whether to do amplitude calibration
    """

    if datastruct.dtype != "EHTIM":
        raise Exception(
            "datastruct must be in EHTIM format in apply_caltable_uvfits!")

    gains0 = pd.read_csv(gaincaltable)
    polygain = {}
    mjd_start = {}
    polyamp = {}

    #deterimine which calibration to use when multiple options for multiple periods
    mjd_mean = datastruct.data['time'].mean() - MJD_0
    gains = gains0[(gains0.mjd_start <= mjd_mean)
                   & (gains0.mjd_stop >= mjd_mean)].reset_index(
                       drop=True).copy()

    for cou, row in gains.iterrows():
        polygain[row.station] = poly_from_str(str(row.ratio_phas))
        mjd_start[row.station] = row.mjd_start
        if cal_amp == True:
            polyamp[row.station] = poly_from_str(str(row.ratio_amp))
        else:
            polyamp[row.station] = poly_from_str('1.0')

    #print(gains0)
    #print(polygain)
    # interpolate the calibration  table
    rinterp = {}
    linterp = {}
    skipsites = []

    #-------------------------------------------
    # sort by baseline
    data = datastruct.data
    idx = np.lexsort((data['t2'], data['t1']))
    bllist = []
    for key, group in it.groupby(data[idx], lambda x: set((x['t1'], x['t2']))):
        bllist.append(np.array([obs for obs in group]))
    bllist = np.array(bllist)

    # apply the  calibration

    datatable = []
    coub = 0
    for bl_obs in bllist:
        t1 = bl_obs['t1'][0]
        t2 = bl_obs['t2'][0]
        coub = coub + 1
        print('Calibrating {}-{} baseline, {}/{}'.format(
            t1, t2, coub, len(bllist)))
        time_mjd = bl_obs['time'] - MJD_0  #dates are in mjd in Datastruct

        ###########################################################################################################################
        #OLD VERSION WHERE LCP IS SHIFTED TO RCP
        #        if t1 in skipsites:
        #            rscale1 = lscale1 = np.array(1.)
        #       else:
        #            try:
        #                rscale1 = 1./np.sqrt(polyamp[t1](time_mjd))
        #                lscale1 = np.sqrt(polyamp[t1](time_mjd))*np.exp(1j*polygain[t1](time_mjd - mjd_start[t1])*np.pi/180.)
        #            except KeyError:
        #                rscale1 = lscale1 = np.array(1.)
        #
        #        if t2 in skipsites:
        #            rscale2 = lscale2 = np.array(1.)
        #        else:
        #            try:
        #                rscale2 = 1./np.sqrt(polyamp[t2](time_mjd))
        #                lscale2 = np.sqrt(polyamp[t2](time_mjd))*np.exp(1j*polygain[t2](time_mjd - mjd_start[t2])*np.pi/180.)
        #            except KeyError:
        #                rscale2 = lscale2 = np.array(1.)
        ###########################################################################################################################

        ###########################################################################################################################
        #NEW VERSION WHERE RCP IS SHIFTED TO LCP // MW 2018/NOV/13
        if t1 in skipsites:
            rscale1 = lscale1 = np.array(1.)
        else:
            try:
                rscale1 = 1. / np.sqrt(polyamp[t1](time_mjd)) * np.exp(
                    -1j * polygain[t1](time_mjd - mjd_start[t1]) * np.pi /
                    180.)
                lscale1 = np.sqrt(polyamp[t1](time_mjd))
            except KeyError:
                rscale1 = lscale1 = np.array(1.)

        if t2 in skipsites:
            rscale2 = lscale2 = np.array(1.)
        else:
            try:
                rscale2 = 1. / np.sqrt(polyamp[t2](time_mjd)) * np.exp(
                    -1j * polygain[t2](time_mjd - mjd_start[t2]) * np.pi /
                    180.)
                lscale2 = np.sqrt(polyamp[t2](time_mjd))
            except KeyError:
                rscale2 = lscale2 = np.array(1.)
###########################################################################################################################

        rrscale = rscale1 * rscale2.conj()
        llscale = lscale1 * lscale2.conj()
        rlscale = rscale1 * lscale2.conj()
        lrscale = lscale1 * rscale2.conj()

        bl_obs['rr'] = (bl_obs['rr']) * rrscale
        bl_obs['ll'] = (bl_obs['ll']) * llscale
        bl_obs['rl'] = (bl_obs['rl']) * rlscale
        bl_obs['lr'] = (bl_obs['lr']) * lrscale

        bl_obs['rrweight'] = (bl_obs['rrweight']) / (np.abs(rrscale)**2)
        bl_obs['llweight'] = (bl_obs['llweight']) / (np.abs(llscale)**2)
        bl_obs['rlweight'] = (bl_obs['rlweight']) / (np.abs(rlscale)**2)
        bl_obs['lrweight'] = (bl_obs['lrweight']) / (np.abs(lrscale)**2)

        if len(datatable):
            datatable = np.hstack((datatable, bl_obs))
        else:
            datatable = bl_obs

    # put in uvfits format datastruct
    # telescope arrays
    tarr = datastruct.antenna_info
    tkeys = {tarr[i]['site']: i for i in range(len(tarr))}
    tnames = tarr['site']
    tnums = np.arange(1, len(tarr) + 1)
    xyz = np.array([[tarr[i]['x'], tarr[i]['y'], tarr[i]['z']]
                    for i in np.arange(len(tarr))])

    # uvfits format output data table
    bl_list = []
    for i in xrange(len(datatable)):
        entry = datatable[i]
        t1num = entry['t1']
        t2num = entry['t2']
        rl = entry['rl']
        lr = entry['lr']
        if tkeys[entry['t2']] < tkeys[
                entry['t1']]:  # reorder telescopes if necessary
            #print entry['t1'], tkeys[entry['t1']], entry['t2'], tkeys[entry['t2']]
            entry['t1'] = t2num
            entry['t2'] = t1num
            entry['u'] = -entry['u']
            entry['v'] = -entry['v']
            entry['rr'] = np.conj(entry['rr'])
            entry['ll'] = np.conj(entry['ll'])
            entry['rl'] = np.conj(lr)
            entry['lr'] = np.conj(rl)
            datatable[i] = entry
        bl_list.append(
            np.array((entry['time'], entry['t1'], entry['t2']), dtype=BLTYPE))
    _, unique_idx_anttime, idx_anttime = np.unique(bl_list,
                                                   return_index=True,
                                                   return_inverse=True)
    _, unique_idx_freq, idx_freq = np.unique(datatable['freq'],
                                             return_index=True,
                                             return_inverse=True)

    # random group params
    u = datatable['u'][unique_idx_anttime]
    v = datatable['v'][unique_idx_anttime]
    t1num = [tkeys[scope] + 1 for scope in datatable['t1'][unique_idx_anttime]]
    t2num = [tkeys[scope] + 1 for scope in datatable['t2'][unique_idx_anttime]]
    bls = 256 * np.array(t1num) + np.array(t2num)
    jds = datatable['time'][unique_idx_anttime]
    tints = datatable['tint'][unique_idx_anttime]

    # data table
    nap = len(unique_idx_anttime)
    nsubchan = 1
    nstokes = 4
    nchan = datastruct.obs_info.nchan

    outdat = np.zeros((nap, 1, 1, nchan, nsubchan, nstokes, 3))
    outdat[:, :, :, :, :, :, 2] = -1.0

    vistypes = ['rr', 'll', 'rl', 'lr']
    for i in xrange(len(datatable)):
        row_freq_idx = idx_freq[i]
        row_dat_idx = idx_anttime[i]

        for j in range(len(vistypes)):
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   0] = np.real(datatable[i][vistypes[j]])
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   1] = np.imag(datatable[i][vistypes[j]])
            outdat[row_dat_idx, 0, 0, row_freq_idx, 0, j,
                   2] = datatable[i][vistypes[j] + 'weight']

    # package data for saving
    obsinfo_out = datastruct.obs_info
    antennainfo_out = Antenna_info(tnames, tnums, xyz)
    uvfitsdata_out = Uvfits_data(u, v, bls, jds, tints, outdat)
    datastruct_out = Datastruct(obsinfo_out, antennainfo_out, uvfitsdata_out)

    # save final file
    save_uvfits(datastruct_out, filename_out)
    return
Пример #32
0
    def train(self, im, init_rect):
        self.pos = [
            init_rect[1] + init_rect[3] / 2., init_rect[0] + init_rect[2] / 2.
        ]
        self.res.append(init_rect)
        self.target_sz = np.asarray(init_rect[2:])
        self.target_sz = self.target_sz[::-1]
        self.im_sz = im.shape[:2]
        if np.prod(self.target_sz) > self.translation_model_max_area:
            self.currentScaleFactor = np.sqrt(
                np.prod(self.init_target_sz) / self.translation_model_max_area)
        else:
            self.currentScaleFactor = 1.0
        # target size at the initial scale
        self.init_target_sz = self.target_sz / self.currentScaleFactor
        # window size, taking padding into account
        self.patch_size = np.floor(self.init_target_sz *
                                   (1 + self.padding)).astype(int)

        if self.compressed_features == 'gray_hog':
            self.output_sigma = np.sqrt(
                np.prod(
                    np.floor(self.init_target_sz /
                             self.feature_ratio))) * self.output_sigma_factor
        elif self.compressed_features == 'cn':
            self.output_sigma = np.sqrt(np.prod(
                self.init_target_sz)) * self.output_sigma_factor

        self.use_sz = np.floor(self.patch_size / self.feature_ratio)
        # compute Y
        grid_y = np.roll(
            np.arange(np.floor(self.use_sz[0])) - np.floor(self.use_sz[0] / 2),
            int(-np.floor(self.use_sz[0] / 2)))
        grid_x = np.roll(
            np.arange(np.floor(self.use_sz[1])) - np.floor(self.use_sz[1] / 2),
            int(-np.floor(self.use_sz[1] / 2)))
        rs, cs = np.meshgrid(grid_x, grid_y)
        self.y = np.exp(-0.5 / self.output_sigma**2 * (rs**2 + cs**2))
        self.yf = self.fft2(self.y)
        if self.interpolate_response:
            self.interp_sz = np.array(self.y.shape) * self.feature_ratio
            rf = self.resizeDFT_2D(self.yf, self.interp_sz)
            r = np.real(np.fft.ifft2(rf))
            # target location is at the maximum response
            self.v_centre_y, self.h_centre_y = np.unravel_index(
                r.argmax(), r.shape)

        # store pre-computed cosine window
        self.cos_window = np.outer(np.hanning(self.use_sz[0]),
                                   np.hanning(self.use_sz[1]))

        if self.number_of_scales > 0:
            # make sure the scale model is not too large so as to save computation time
            if self.scale_model_factor**2 * np.prod(
                    self.init_target_sz) > self.scale_model_max_area:
                self.scale_model_factor = np.sqrt(
                    float(self.scale_model_max_area) /
                    np.prod(self.init_target_sz))

            # set the scale model size
            self.scale_model_sz = np.floor(self.init_target_sz *
                                           self.scale_model_factor).astype(int)
            # force reasonable scale changes
            self.min_scale_factor = self.scale_step**np.ceil(
                np.log(np.max(5. / self.patch_size)) / np.log(self.scale_step))
            self.max_scale_factor = self.scale_step**np.floor(
                np.log(
                    np.min(
                        np.array([im.shape[0], im.shape[1]]) * 1.0 /
                        self.init_target_sz)) / np.log(self.scale_step))

            if self.s_num_compressed_dim == 'MAX':
                self.s_num_compressed_dim = len(self.scaleSizeFactors)
        ################################################################################################################
        # Compute coefficients for the translation filter
        ################################################################################################################
        # extract the feature map of the local image patch to train the classifer
        self.im_crop = self.get_subwindow(
            im, self.pos, self.patch_size * self.currentScaleFactor)
        # initiliase the appearance
        self.h_num_npca, self.h_num_pca = self.get_features(self.im_crop)

        # if dimensionality reduction is used: update the projection matrix
        # refere to tPAMI paper eq. (7a)
        self.projection_matrix, self.old_cov_matrix = \
            self.calculate_projection(self.h_num_pca, self.num_compressed_dim, self.interp_factor, old_cov_matrix=[])

        # project the features of the new appearance example using the new projection matrix
        self.h_proj = self.feature_projection(self.h_num_npca, self.h_num_pca,
                                              self.projection_matrix,
                                              self.cos_window)

        if self.kernel == 'linear':
            self.hf_proj = self.fft2(self.h_proj)
            self.hf_num = np.multiply(np.conj(self.yf[:, :, None]),
                                      self.hf_proj)
            self.hf_den = np.sum(
                np.multiply(self.hf_proj, np.conj(self.hf_proj)),
                2) + self.lambda_value

        elif self.kernel == 'gaussian':
            # TODO: gaussian kernel
            self.kf = self.fft2(
                self.dense_gauss_kernel(self.sigma, self.h_proj))
            self.alpha_num = np.multiply(self.yf, self.kf)
            self.alpha_den = np.multiply(self.kf,
                                         (self.kf + self.lambda_value))

        ################################################################################################################
        # Compute coefficents for the scale filter
        ################################################################################################################
        if self.number_of_scales > 0:
            self.s_num = self.get_scale_subwindow(
                im, self.pos, self.init_target_sz,
                self.currentScaleFactor * self.scaleSizeFactors,
                self.scale_model_sz)
            # project the features of the new appearance example using the new projection matrix
            self.projection_matrix_scale = self.calculate_projection(
                self.s_num, self.s_num_compressed_dim)
            # self.s_proj is of dim D * N !
            self.s_proj = self.feature_projection([], self.s_num,
                                                  self.projection_matrix_scale,
                                                  self.scale_wnidow)

            if self.kernel == 'linear':
                self.sf_proj = np.fft.fft(self.s_proj, axis=1)
                self.sf_num = np.multiply(self.ysf, np.conj(self.sf_proj))
                self.sf_den = np.sum(
                    np.multiply(self.sf_proj, np.conj(self.sf_proj)), 0)

            elif self.kernel == 'gaussian':
                # TODO: gaussian kernel
                pass
Пример #33
0
for filename in args:
    uvi = a.miriad.UV(filename)
    a.scripting.uv_selector(uvi, opts.ant, opts.pol)
    for (crd, t, (i, j)), d, f in uvi.all(raw=True):
        if len(times) == 0 or times[-1] != t:
            if len(times) % 8 == 0:
                eor_mdl[t] = n.random.normal(size=chans.size) * n.exp(
                    2j * n.pi * n.random.uniform(size=chans.size))
            else:
                eor_mdl[t] = eor_mdl[times[-1]]
            times.append(t)
        bl = a.miriad.ij2bl(i, j)
        sep = bl2sep[bl]
        if sep < 0:
            #print 'Conj:', a.miriad.bl2ij(bl)
            d, sep = n.conj(d), -sep
        d, f = d.take(chans), f.take(chans)
        w = n.logical_not(f).astype(n.float)
        Trms = d * capo.pspec.jy2T(afreqs)
        if True:  # generate noise
            TSYS = 560e3  # mK
            B = 100e6 / uvi['nchan']
            NDAY = 44
            NBL = 4
            NPOL = 2
            T_INT = 43.  # for just compressed data
            #T_INT = 351. # for fringe-rate filtered data
            Trms_ = n.random.normal(size=Trms.size) * n.exp(
                2j * n.pi * n.random.uniform(size=Trms.size))
            Trms_ *= TSYS / n.sqrt(B * T_INT * NDAY * NBL * NPOL)
            #Trms_ *= n.sqrt(n.sqrt(351./43)) # penalize for oversampling fr-filtered data
Пример #34
0
    return f


# res = minimize(lambda coef:opt_fidelity(coef),method="powell",x0=[0.1968067,-0.07472552,0.06522846])
# # # res = minimize(lambda coef:opt_fidelity(coef,plot=True),method="powell",x0=[-0.1,-0.1,-0.1])
# # # res = minimize(lambda coef:opt_fidelity(coef),method="powell",x0=[0.1,0.1,0.1])
# opt_fidelity(res.x,plot=True)
# opt_fidelity([0.2451307,0,0],plot=True)
# opt_fidelity([-0.23457384,0,0],plot=True)

z = zm_state(3, 1, pxp)
coef = [0.18243653, -0.10390499, 0.0544521]
# coef = [0,0,0]
H = H0.sector.matrix() + coef[0] * V1 + coef[1] * V2 + coef[2] * V3
e, u = np.linalg.eigh(H)
psi_energy = np.conj(u[pxp.keys[z.ref], :])
eigenvalues = np.copy(e)
overlap = np.log10(np.abs(psi_energy)**2)
to_del = []
for n in range(0, np.size(overlap, axis=0)):
    if overlap[n] < -5:
        to_del = np.append(to_del, n)
for n in range(np.size(to_del, axis=0) - 1, -1, -1):
    overlap = np.delete(overlap, to_del[n])
    eigenvalues = np.delete(eigenvalues, to_del[n])
plt.xlabel(r"$E$")
plt.ylabel(r"$\log(\vert \langle Z_3 \vert E \rangle \vert^2)$")
# plt.title(r"$PXP$ Optimized $Z_3$ Pertubations, N="+str(pxp.N))
plt.title(r"$PXP$ Optimized $Z_3$ Pertubations, N=18")
plt.scatter(eigenvalues, overlap)
plt.show()
Пример #35
0
def mps(strf, fstep, tstep, half=False):
    """Calculate the Modulation Power Spectrum of a STRF.

    Parameters
    ----------
    strf : array, shape (nfreqs, nlags)
        The STRF we'll use for MPS calculation.
    fstep : float
        The step size of the frequency axis for the STRF
    tstep : float
        The step size of the time axis for the STRF.
    half : bool
        Return the top half of the MPS (aka, the Positive
        frequency modulations)

    Returns
    -------
    mps_freqs : array
        The values corresponding to spectral modulations, in cycles / octave
        or cycles / Hz depending on the units of fstep
    mps_times : array
        The values corresponding to temporal modulations, in Hz
    amps : array
        The MPS of the input strf

    """
    # Convert to frequency space and take amplitude
    nfreqs, nlags = strf.shape
    fstrf = np.fliplr(strf)
    mps = np.fft.fftshift(np.fft.fft2(fstrf))
    amps = np.real(mps * np.conj(mps))

    # Obtain labels for frequency axis
    mps_freqs = np.zeros([nfreqs])
    fcircle = 1.0 / fstep
    for i in range(nfreqs):
        mps_freqs[i] = (i / float(nfreqs)) * fcircle
        if mps_freqs[i] > fcircle / 2.0:
            mps_freqs[i] -= fcircle

    mps_freqs = np.fft.fftshift(mps_freqs)
    if mps_freqs[0] > 0.0:
        mps_freqs[0] = -mps_freqs[0]

    # Obtain labels for time axis
    fcircle = tstep
    mps_times = np.zeros([nlags])
    for i in range(nlags):
        mps_times[i] = (i / float(nlags)) * fcircle
        if mps_times[i] > fcircle / 2.0:
            mps_times[i] -= fcircle

    mps_times = np.fft.fftshift(mps_times)
    if mps_times[0] > 0.0:
        mps_times[0] = -mps_times[0]

    if half:
        halfi = np.where(mps_freqs == 0.0)[0][0]
        amps = amps[halfi:, :]
        mps_freqs = mps_freqs[halfi:]

    return mps_freqs, mps_times, amps
Пример #36
0
    def surface_S(self, az_deg=0):
        """ This function generates a (short) time series of surface realizations.
    
            :param az_deg: azimuth angle, in degree
            :param ntimes: number of time samples generated.
            :param t_step: spacing between time samples. This can be interpreted as the Pulse Repetition Interval
    
            :returns: a tuple with the configuration object, the surfaces, the radial velocities for each grid point,
                      and the complex scattering coefficients
        """
        if not self.inc_is_set:
            print("Set the incident angle first")
            return

        cfg = self.cfg
        use_hmtf = self.use_hmtf
        scat_spec_enable = self.scat_spec_enable
        scat_spec_mode = self.scat_spec_mode
        scat_bragg_enable = self.scat_bragg_enable
        scat_bragg_model = self.scat_bragg_model
        scat_bragg_d = self.scat_bragg_d
        scat_bragg_spec = self.scat_bragg_spec
        scat_bragg_spread = self.scat_bragg_spread

        # SAR
        try:
            radcfg = cfg.sar
        except AttributeError:
            radcfg = cfg.radar
        alt = radcfg.alt
        f0 = radcfg.f0
        prf = radcfg.prf

        pol = self.pol
        l0 = const.c / f0
        k0 = 2. * np.pi * self.f0 / const.c

        do_hh = self.do_hh
        do_vv = self.do_vv
        # OCEAN / OTHERS
        ocean_dt = cfg.ocean.dt

        # Get a surface realization calculated
        # self.surface.t = 0

        inc_angle = self.inc_angle
        sr0 = geosar.inc_to_sr(inc_angle, alt)
        gr0 = geosar.inc_to_gr(inc_angle, alt)
        gr = self.surface.x + gr0
        sr, inc, _ = geosar.gr_to_geo(gr, alt)
        sr -= np.min(sr)
        inc = inc.reshape(1, inc.size)
        sr = sr.reshape(1, sr.size)
        gr = gr.reshape(1, gr.size)
        sin_inc = np.sin(inc)
        cos_inc = np.cos(inc)
        az_rad = np.radians(az_deg)
        cos_az = np.cos(az_rad)
        sin_az = np.sin(az_rad)

        t_last_rcs_bragg = -1.
        last_progress = -1
        ntimes = self.ntimes
        NRCS_avg_vv = np.zeros(ntimes, dtype=np.float)
        NRCS_avg_hh = np.zeros(ntimes, dtype=np.float)
        # RCS MODELS
        # Specular
        if scat_spec_enable:
            if scat_spec_mode == 'kodis':
                rcs_spec = rcs.RCSKodis(inc, k0, self.surface.dx,
                                        self.surface.dy)
            elif scat_spec_mode == 'fa' or scat_spec_mode == 'spa':
                spec_ph0 = np.random.uniform(
                    0., 2. * np.pi, size=[self.surface.Ny, self.surface.Nx])
                rcs_spec = rcs.RCSKA(scat_spec_mode, k0, self.surface.x,
                                     self.surface.y, self.surface.dx,
                                     self.surface.dy)
            else:
                raise NotImplementedError(
                    'RCS mode %s for specular scattering not implemented' %
                    scat_spec_mode)

        # Bragg
        if scat_bragg_enable:
            phase_bragg = np.zeros([2, self.surface.Ny, self.surface.Nx])
            bragg_scats = np.zeros([2, self.surface.Ny, self.surface.Nx],
                                   dtype=np.complex)
            tau_c = closure.grid_coherence(cfg.ocean.wind_U, self.surface.dx,
                                           f0)
            rndscat_p = closure.randomscat_ts(
                tau_c, (self.surface.Ny, self.surface.Nx), prf)
            rndscat_m = closure.randomscat_ts(
                tau_c, (self.surface.Ny, self.surface.Nx), prf)
            # NOTE: This ignores slope, may be changed
            k_b = 2. * k0 * sin_inc
            c_b = sin_inc * np.sqrt(const.g / k_b + 0.072e-3 * k_b)

        surface_area = self.surface.dx * self.surface.dy * self.surface.Nx * self.surface.Ny
        if do_hh:
            scene_hh = np.zeros([ntimes, self.surface.Ny, self.surface.Nx],
                                dtype=np.complex)
        if do_vv:
            scene_vv = np.zeros([ntimes, self.surface.Ny, self.surface.Nx],
                                dtype=np.complex)

        for az_step in range(ntimes):

            # AZIMUTH & SURFACE UPDATE
            t_now = az_step * self.t_step

            ## COMPUTE RCS FOR EACH MODEL
            # Note: SAR processing is range independent as slant range is fixed
            # sin_az = az / sr0
            # az_proj_angle = np.arcsin(az / gr0)

            # Note: Projected displacements are added to slant range
            sr_surface = (
                sr - cos_inc * self.dz[az_step] + sin_inc *
                (self.dx[az_step] * cos_az + self.dy[az_step] * sin_az))

            # Specular
            if scat_spec_enable:
                if scat_spec_mode == 'kodis':
                    Esn_sp = np.sqrt(4. * np.pi) * rcs_spec.field(
                        az_rad, sr_surface, self.diffx[az_step],
                        self.diffy[az_step], self.diffxx[az_step],
                        self.diffyy[az_step], self.diffxy[az_step])
                    if do_hh:
                        scene_hh[az_step] += Esn_sp
                    if do_vv:
                        scene_vv[az_step] += Esn_sp
                else:
                    # FIXME
                    if do_hh:
                        pol_tmp = 'hh'
                        Esn_sp = (
                            np.exp(-1j * (2. * k0 * sr_surface)) *
                            (4. * np.pi)**1.5 * rcs_spec.field(
                                1, 1, pol_tmp[0], pol_tmp[1], inc, inc, az_rad,
                                az_rad + np.pi, self.dz[az_step],
                                self.diffx[az_step], self.diffy[az_step],
                                self.diffxx[az_step], self.diffyy[az_step],
                                self.diffxy[az_step]))
                        scene_hh[az_step] += Esn_sp
                    if do_vv:
                        pol_tmp = 'vv'
                        Esn_sp = (
                            np.exp(-1j * (2. * k0 * sr_surface)) *
                            (4. * np.pi)**1.5 * rcs_spec.field(
                                1, 1, pol_tmp[0], pol_tmp[1], inc, inc, az_rad,
                                az_rad + np.pi, self.dz[az_step],
                                self.diffx[az_step], self.diffy[az_step],
                                self.diffxx[az_step], self.diffyy[az_step],
                                self.diffxy[az_step]))
                        scene_vv[az_step] += Esn_sp
                NRCS_avg_hh[az_step] += (np.sum(np.abs(Esn_sp)**2) /
                                         surface_area)
                NRCS_avg_vv[az_step] += NRCS_avg_hh[az_step]

            # Bragg
            if scat_bragg_enable:
                if (t_now - t_last_rcs_bragg) > ocean_dt:

                    if scat_bragg_model == 'romeiser97':
                        if pol == 'DP':
                            RCS_bragg_hh, RCS_bragg_vv = self.rcs_bragg.rcs(
                                az_rad, self.diffx[az_step],
                                self.diffy[az_step])
                        elif pol == 'hh':
                            RCS_bragg_hh = self.rcs_bragg.rcs(
                                az_rad, self.diffx[az_step],
                                self.diffy[az_step])
                        else:
                            RCS_bragg_vv = self.rcs_bragg.rcs(
                                az_rad, self.diffx[az_step],
                                self.diffy[az_step])

                    if use_hmtf:
                        # Fix Bad MTF points
                        (self.h_mtf[az_step])[np.where(
                            self.surface.hMTF < -1)] = -1
                        if do_hh:
                            RCS_bragg_hh[0] *= (1 + self.h_mtf[az_step])
                            RCS_bragg_hh[1] *= (1 + self.h_mtf[az_step])
                        if do_vv:
                            RCS_bragg_vv[0] *= (1 + self.h_mtf[az_step])
                            RCS_bragg_vv[1] *= (1 + self.h_mtf[az_step])

                    t_last_rcs_bragg = t_now

                if do_hh:
                    scat_bragg_hh = np.sqrt(RCS_bragg_hh)
                    NRCS_bragg_hh_instant_avg = np.sum(
                        RCS_bragg_hh) / surface_area
                    NRCS_avg_hh[az_step] += NRCS_bragg_hh_instant_avg
                if do_vv:
                    scat_bragg_vv = np.sqrt(RCS_bragg_vv)
                    NRCS_bragg_vv_instant_avg = np.sum(
                        RCS_bragg_vv) / surface_area
                    NRCS_avg_vv[az_step] += NRCS_bragg_vv_instant_avg

                # Doppler phases (Note: Bragg radial velocity taken constant!)
                surf_phase = -(2 * k0) * sr_surface
                cap_phase = (2 * k0) * self.t_step * c_b * (az_step + 1)
                phase_bragg[0] = surf_phase - cap_phase  # + dop_phase_p
                phase_bragg[1] = surf_phase + cap_phase  # + dop_phase_m
                bragg_scats[0] = rndscat_m.scats(t_now)
                bragg_scats[1] = rndscat_p.scats(t_now)

                if do_hh:
                    scene_hh[az_step] += ne.evaluate(
                        'sum(scat_bragg_hh * exp(1j*phase_bragg) * bragg_scats, axis=0)'
                    )
                if do_vv:
                    scene_vv[az_step] += ne.evaluate(
                        'sum(scat_bragg_vv * exp(1j*phase_bragg) * bragg_scats, axis=0)'
                    )

        v_r = (self.surface.Vx * np.sin(inc) * np.cos(az_rad) +
               self.surface.Vy * np.sin(inc) * np.sin(az_rad) -
               self.surface.Vz * np.cos(inc))

        sigma_v_r = np.std(v_r)

        # Some stats
        def weighted_stats(v, w):
            wm = np.sum(v * w) / np.sum(w)
            wsigma = np.sqrt(np.sum(w * (v - wm)**2) / np.sum(w))
            return wm, wsigma

        if do_hh:
            w = np.abs(scene_hh[0])**2
            # v_r_whh = np.sum(v_r * w) / np.sum(w)
            v_r_whh, sigma_v_r_whh = weighted_stats(v_r, w)
            # FIXME, for now just one lag

            v_ati_hh = -(np.angle(np.mean(scene_hh[1] * np.conj(scene_hh[0])))
                         / self.t_step * const.c / 2 / self.f0 / 2 / np.pi)
            surfcoh_ati_hh = (np.mean(scene_hh[1] * np.conj(scene_hh[0])) /
                              np.sqrt(
                                  np.mean(np.abs(scene_hh[1])**2) *
                                  np.mean(np.abs(scene_hh[0])**2)))
        if do_vv:
            w = np.abs(scene_vv[0])**2
            # v_r_wvv = np.sum(v_r * w) / np.sum(w)
            v_r_wvv, sigma_v_r_wvv = weighted_stats(v_r, w)
            v_ati_vv = -(np.angle(np.mean(scene_vv[1] * np.conj(scene_vv[0])))
                         / self.t_step * const.c / 2 / self.f0 / 2 / np.pi)
            surfcoh_ati_vv = (np.mean(scene_vv[1] * np.conj(scene_vv[0])) /
                              np.sqrt(
                                  np.mean(np.abs(scene_vv[1])**2) *
                                  np.mean(np.abs(scene_vv[0])**2)))

        if do_hh and do_vv:
            return {
                'v_r': v_r,
                'scene_hh': scene_hh,
                'scene_vv': scene_vv,
                'NRCS_hh': NRCS_avg_hh,
                'NRCS_vv': NRCS_avg_vv,
                'v_r_whh': v_r_whh,
                'v_r_wvv': v_r_wvv,
                'v_ATI_hh': v_ati_hh,
                'v_ATI_vv': v_ati_vv,
                'sigma_v_r': sigma_v_r,
                'sigma_v_r_whh': sigma_v_r_whh,
                'sigma_v_r_wvv': sigma_v_r_wvv,
                'surfcoh_ati_hh': surfcoh_ati_hh,
                'surfcoh_ati_vv': surfcoh_ati_vv
            }
            # return (cfg, self.surface.Dz, v_r, scene_hh, scene_vv)
        elif do_hh:
            return {
                'v_r': v_r,
                'scene_hh': scene_hh,
                'scene_vv': None,
                'NRCS_hh': NRCS_avg_hh,
                'NRCS_vv': None,
                'v_r_whh': v_r_whh,
                'v_r_wvv': None,
                'v_ATI_hh': v_ati_hh,
                'v_ATI_vv': None,
                'sigma_v_r': sigma_v_r,
                'sigma_v_r_whh': sigma_v_r_whh,
                'surfcoh_ati_hh': surfcoh_ati_hh
            }
            # return (cfg, self.surface.Dz, v_r, scene_hh)
        else:
            return {
                'v_r': v_r,
                'scene_hh': None,
                'scene_vv': scene_vv,
                'NRCS_hh': None,
                'NRCS_vv': NRCS_avg_vv,
                'v_r_whh': None,
                'v_r_wvv': v_r_wvv,
                'v_ATI_hh': None,
                'v_ATI_vv': v_ati_vv,
                'sigma_v_r': sigma_v_r,
                'sigma_v_r_wvv': sigma_v_r_wvv,
                'surfcoh_ati_vv': surfcoh_ati_vv
            }
 def _get_energy(a: np.ndarray):
     # Element-wise product of a and its conjugate transpose. Sum it all together
     return np.sum(a * np.conj(a))
Пример #38
0
# compare convolution theorem result with direct calculation of sums
convolve(f, g)
convolve_sum(f, g)

# compare with numpy.convolve
numpy.convolve(f, g, 'full')
numpy.convolve(f, g, 'same')
numpy.convolve(f, g, 'valid')

# Cross Correlation

# %%
# f and g are not zero padded
f_fft = numpy.fft.fft(f)
g_fft = numpy.fft.fft(g)
cc_fft = numpy.conj(f_fft) * g_fft
cc = numpy.fft.ifft(cc_fft)
cc

# %%
# f and g are zero padded
f_padded = numpy.concatenate((f, numpy.zeros(len(f)-1)))
g_padded = numpy.concatenate((g, numpy.zeros(len(g)-1)))
f_fft = numpy.fft.fft(f_padded)
g_fft = numpy.fft.fft(g_padded)
cc_fft = numpy.conj(f_fft) * g_fft
cc = numpy.fft.ifft(cc_fft)
cc

# %%
# compare convolution theorem result with direct calculation of sums
Пример #39
0
def _spg_line(f, x, d, gtd, fmax, A, b):
    """Non-monotone linesearch.

    Parameters
    ----------
    f : float
        Residual norm
    x : ndarray
        Input array
    d : float
        Difference between input array and proposed projected array
    gtd : float
        Dot product between gradient and d
    fmax : float
        Maximum residual norm
    A : {sparse matrix, ndarray, LinearOperator}
        Operator
    b : ndarray
        Data

    Returns
    -------
    fnew : float
        Residual norm after linesearch projection
    xnew : ndarray
        Model after linesearch projection
    xnew : ndarray
        Residual after linesearch projection
    niters : int
        Number of iterations
    err : int
        Error flag
    timematprod : float
        Time in secs for matvec computations

    """
    maxiters = 10
    step = 1.
    niters = 0
    gamma = 1e-4
    gtd = -abs(gtd)
    timematprod = 0
    while 1:
        # Evaluate trial point and function value.
        xnew = x + step * d
        start_time_matprod = time.time()
        rnew = b - A.matvec(xnew)
        timematprod += time.time() - start_time_matprod
        fnew = abs(np.conj(rnew).dot(rnew)) / 2.

        # Check exit conditions.
        if fnew < fmax + gamma * step * gtd:  # Sufficient descent condition.
            err = EXIT_CONVERGED_spgline
            break
        elif niters >= maxiters:  # Too many linesearch iterations.
            err = EXIT_ITERATIONS_spgline
            break

        # New line-search iteration.
        niters += 1

        # Safeguarded quadratic interpolation.
        if step <= 0.1:
            step /= 2.
        else:
            tmp = (-gtd * step**2.) / (2 * (fnew - f - step * gtd))
            if tmp < 0.1 or tmp > 0.9 * step or np.isnan(tmp):
                tmp = step / 2.
            step = tmp
    return fnew, xnew, rnew, niters, err, timematprod
Пример #40
0
def power_spec_3D(map1, map2, dx, dy, dz, window_func=None):
    '''
    calculate cross power spectrum of a 3-dim Nx x Ny x Nz map1 and map2. 
    Set map1 = map2 for auto spectrum.
    
    Inputs:
    ======
    map1, map2: input 3D maps
    dx, dy, dz: the grid size in the 0th, 1st dimension
    window_func: [None, 'blackman'] apply window function. Default not apply (None)
    
    Outputs:
    =======
    P3D: 3D power spectrum, 
        dimension: (Nx+1)/2 if Nx odd; Nx/2 + 1 if Nx even (same for Ny, Nz)
    kx_vec, ky_vec, kz_vec: corresponding kx, ky vector
    '''

    if map1.shape != map2.shape:
        raise ValueError('two input maps do not have the same shape')

    Nx, Ny, Nz = map1.shape

    # Window function
    if window_func == 'blackman':
        W = _blackman3D(Nx, Ny, Nz)
        map1w = map1 * W
        map2w = map2 * W
    elif window_func == None:
        map1w = map1.copy()
        map2w = map2.copy()
    else:
        raise ValueError('window function name must be None or "blackman". ')

    kx_vec_all = np.fft.fftfreq(Nx) * 2 * np.pi / dx
    ky_vec_all = np.fft.fftfreq(Ny) * 2 * np.pi / dy
    kz_vec_all = np.fft.fftfreq(Nz) * 2 * np.pi / dz
    ftmap1 = np.fft.fftn(map1w) * dx * dy * dz
    ftmap2 = np.fft.fftn(map2w) * dx * dy * dz

    V = Nx * Ny * Nz * dx * dy * dz
    P3D_all = np.real(ftmap1 * np.conj(ftmap2)) / V

    Nuse = _kvec_Nuse(Nz)
    kz_vec_all = abs(kz_vec_all[:Nuse])
    P3D_all = P3D_all[:, :, :Nuse]
    N3D_all = np.ones_like(P3D_all)

    # extract only the positive kx, ky part
    N_use = _kvec_Nuse(Ny)
    N_dup = _kvec_Ndup(Ny)
    ky_vec = abs(ky_vec_all[:N_use])
    P3D = P3D_all[:, :N_use, :]
    N3D = N3D_all[:, :N_use, :]

    Pdup = P3D_all[:, -N_dup:, :]
    Ndup = N3D_all[:, -N_dup:, :]
    P3D[:,
        1:1 + N_dup, :] = (P3D[:, 1:1 + N_dup, :] + np.flip(Pdup, axis=1)) / 2
    N3D[:, 1:1 + N_dup, :] = N3D[:, 1:1 + N_dup, :] + np.flip(Ndup, axis=1)

    N_use = _kvec_Nuse(Nx)
    N_dup = _kvec_Ndup(Nx)
    kx_vec = abs(kx_vec_all[:N_use])
    P3D = P3D[:N_use, :, :]
    N3D = N3D[:N_use, :, :]

    Pdup = P3D[-N_dup:, :, :]
    Ndup = N3D[-N_dup:, :, :]
    P3D[1:1 +
        N_dup, :, :] = (P3D[1:1 + N_dup, :, :] + np.flip(Pdup, axis=0)) / 2
    N3D[1:1 + N_dup, :, :] = N3D[1:1 + N_dup, :, :] + np.flip(Ndup, axis=0)

    kz_vec = kz_vec_all.copy()
    return P3D, kx_vec, ky_vec, kz_vec, N3D
Пример #41
0
def _spg_line_curvy(x, g, fmax, A, b, project, weights, tau):
    """Projected backtracking linesearch.

    On entry, g is the (possibly scaled) steepest descent direction.

    Parameters
    ----------
    x : ndarray
        Input array
    g : ndarray
        Input gradient
    fmax : float
        Maximum residual norm
    A : {sparse matrix, ndarray, LinearOperator}
        Operator
    b : ndarray
        Data
    project : func, optional
        Projection function
    weights : {float, ndarray}, optional
        Weights ``W`` in ``||Wx||_1``
    tau : float, optional
        Projection radium

    Returns
    -------
    fnew : float
        Residual norm after linesearch projection
    xnew : ndarray
        Model after linesearch projection
    rnew : ndarray
        Residual after linesearch projection
    niters : int
        Number of iterations
    step : int
        Final step
    err : int
        Error flag
    timeproject : float
        Time in secs for projection
    timematprod : float
        Time in secs for matvec computations

    """
    gamma = 1e-4
    maxiters = 10
    step = 1.
    snorm = 0.
    scale = 1.
    nsafe = 0
    niters = 0
    n = x.size
    timeproject = 0
    timematprod = 0
    while 1:
        # Evaluate trial point and function value.
        start_time_project = time.time()
        xnew = project(x - step * scale * g, weights, tau)
        timeproject += time.time() - start_time_project
        start_time_matprod = time.time()
        rnew = b - A.matvec(xnew)
        timematprod += time.time() - start_time_matprod
        fnew = np.abs(np.conj(rnew).dot(rnew)) / 2.
        s = xnew - x
        gts = scale * np.real(np.dot(np.conj(g), s))

        if gts >= 0:
            err = EXIT_NODESCENT_spgline
            break

        if fnew < fmax + gamma * step * gts:
            err = EXIT_CONVERGED_spgline
            break
        elif niters >= maxiters:
            err = EXIT_ITERATIONS_spgline
            break

        # New linesearch iteration.
        niters += 1
        step /= 2.

        # Safeguard: If stepMax is huge, then even damped search
        # directions can give exactly the same point after projection. If
        # we observe this in adjacent iterations, we drastically damp the
        # next search direction.
        snormold = snorm
        snorm = np.linalg.norm(s) / np.sqrt(n)
        if abs(snorm - snormold) <= 1e-6 * snorm:
            gnorm = np.linalg.norm(g) / np.sqrt(n)
            scale = snorm / gnorm / (2.**nsafe)
            nsafe += 1.
    return fnew, xnew, rnew, niters, step, err, timeproject, timematprod
Пример #42
0
def spgl1(A,
          b,
          tau=0,
          sigma=0,
          x0=None,
          fid=None,
          verbosity=0,
          iter_lim=None,
          n_prev_vals=3,
          bp_tol=1e-6,
          ls_tol=1e-6,
          opt_tol=1e-4,
          dec_tol=1e-4,
          step_min=1e-16,
          step_max=1e5,
          active_set_niters=np.inf,
          subspace_min=False,
          iscomplex=False,
          max_matvec=np.inf,
          weights=None,
          project=_norm_l1_project,
          primal_norm=_norm_l1_primal,
          dual_norm=_norm_l1_dual):
    r"""SPGL1 solver.

    Solve basis pursuit (BP), basis pursuit denoise (BPDN), or LASSO problems
    [1]_ [2]_ depending on the choice of ``tau`` and ``sigma``::

        (BP)     minimize  ||x||_1  subj. to  Ax = b

        (BPDN)   minimize  ||x||_1  subj. to  ||Ax-b||_2 <= sigma

        (LASSO)  minimize  ||Ax-b||_2  subj, to  ||x||_1 <= tau

    The matrix ``A`` may be square or rectangular (over-determined or
    under-determined), and may have any rank.

    Parameters
    ----------
    A : {sparse matrix, ndarray, LinearOperator}
        Representation of an m-by-n matrix.  It is required that
        the linear operator can produce ``Ax`` and ``A^T x``.
    b : array_like, shape (m,)
        Right-hand side vector ``b``.
    tau : float, optional
        LASSO threshold. If different from ``None``, spgl1 solves LASSO problem
    sigma : float, optional
        BPDN threshold. If different from ``None``, spgl1 solves BPDN problem
    x0 : array_like, shape (n,), optional
        Initial guess of x, if None zeros are used.
    fid : file, optional
        File ID to direct log output, if None print on screen.
    verbosity : int, optional
        0=quiet, 1=some output, 2=more output.
    iter_lim : int, optional
        Max. number of iterations (default if ``10*m``).
    n_prev_vals : int, optional
         Line-search history lenght.
    bp_tol : float, optional
        Tolerance for identifying a basis pursuit solution.
    ls_tol : float, optional
         Tolerance for least-squares solution. Iterations are stopped when the
         ratio between the dual norm of the gradient and the L2 norm of the
         residual becomes smaller or equal to ``ls_tol``.
    opt_tol : float, optional
        Optimality tolerance. More specifically, when using basis pursuit
        denoise, the optimility condition is met when the absolute difference
        between the L2 norm of the residual and the ``sigma`` is smaller than
        ``opt_tol``.
    dec_tol : float, optional
        Required relative change in primal objective for Newton.
        Larger ``decTol`` means more frequent Newton updates.
    step_min : float, optional
        Minimum spectral step.
    step_max : float, optional
        Maximum spectral step.
    active_set_niters : float, optional
        Maximum number of iterations where no change in support is tolerated.
        Exit with EXIT_ACTIVE_SET if no change is observed for ``activeSetIt``
        iterations
    subspace_min : bool, optional
        Subspace minimization (``True``) or not (``False``)
    iscomplex : bool, optional
        Problem with complex variables (``True``) or not (``False``)
    max_matvec : int, optional
        Maximum matrix-vector multiplies allowed
    weights : {float, ndarray}, optional
        Weights ``W`` in ``||Wx||_1``
    project : func, optional
        Projection function
    primal_norm : func, optional
        Primal norm evaluation fun
    dual_norm : func, optional
         Dual norm eval function

    Returns
    -------
    x : array_like, shape (n,)
        Inverted model
    r : array_like, shape (m,)
        Final residual
    g : array_like, shape (h,)
        Final gradient
    info : dict
        Dictionary with the following information:

        ``tau``, final value of tau (see sigma above)

        ``rnorm``, two-norm of the optimal residual

        ``rgap``, relative duality gap (an optimality measure)

        ``gnorm``, Lagrange multiplier of (LASSO)

        ``stat``,
           ``1``: found a BPDN solution,
           ``2``: found a BP solution; exit based on small gradient,
           ``3``: found a BP solution; exit based on small residual,
           ``4``: found a LASSO solution,
           ``5``: error: too many iterations,
           ``6``: error: linesearch failed,
           ``7``: error: found suboptimal BP solution,
           ``8``: error: too many matrix-vector products

        ``niters``, number of iterations

        ``nProdA``, number of multiplications with A

        ``nProdAt``, number of multiplications with A'

        ``n_newton``, number of Newton steps

        ``time_project``, projection time (seconds)

        ``time_matprod``, matrix-vector multiplications time (seconds)

        ``time_total``, total solution time (seconds)

        ``niters_lsqr``, number of lsqr iterations (if ``subspace_min=True``)

        ``xnorm1``, L1-norm model solution history through iterations

        ``rnorm2``, L2-norm residual history through iterations

        ``lambdaa``, Lagrange multiplier history through iterations

    References
    ----------
    .. [1] E. van den Berg and M. P. Friedlander, "Probing the Pareto frontier
             for basis pursuit solutions", SIAM J. on Scientific Computing,
             31(2):890-912. (2008).
    .. [2] E. van den Berg and M. P. Friedlander, "Sparse optimization with
             least-squares constraints", Tech. Rep. TR-2010-02, Dept of
             Computer Science, Univ of British Columbia (2010).

    """
    start_time = time.time()

    A = aslinearoperator(A)
    m, n = A.shape

    if tau == 0:
        single_tau = False
    else:
        single_tau = True

    if iter_lim is None:
        iter_lim = 10 * m

    max_line_errors = 10  # Maximum number of line-search failures.
    piv_tol = 1e-12  # Threshold for significant Newton step.
    max_matvec = max(3, max_matvec)  # Max number of allowed matvec/rmatvec.

    # Initialize local variables.
    niters = 0  # Total SPGL1 iterations.
    niters_lsqr = 0  # Total LSQR iterations.
    nprodA = 0  # Number of matvec operations
    nprodAt = 0  # Number of rmatvec operations
    last_fv = np.full(10, -np.inf)  # Last m function values.
    nline_tot = 0  # Total number of linesearch steps.
    print_tau = False
    n_newton = 0  # Number of Newton iterations
    bnorm = np.linalg.norm(b)
    stat = False
    time_project = 0  # Time spent in projections
    time_matprod = 0  # Time spent in matvec computations
    nnz_niters = 0  # No. of iterations with fixed pattern.
    nnz_idx = None  # Active-set indicator.
    subspace = False  # Flag if did subspace min in current itn.
    stepg = 1  # Step length for projected gradient.
    test_updatetau = False  # Previous step did not update tau

    # Determine initial x and see if problem is complex
    realx = np.lib.isreal(A).all() and np.lib.isreal(b).all()
    if x0 is None:
        x = np.zeros(n, dtype=b.dtype)
    else:
        x = np.asarray(x0)

    # Override realx when iscomplex flag is set
    if iscomplex:
        realx = False

    # Check if all weights (if any) are strictly positive. In previous
    # versions we also checked if the number of weights was equal to
    # n. In the case of multiple measurement vectors, this no longer
    # needs to apply, so the check was removed.
    if weights is not None:
        if not np.isfinite(weights).all():
            raise ValueError('Entries in weights must be finite')
        if np.any(weights <= 0):
            raise ValueError('Entries in weights must be strictly positive')
    else:
        weights = 1

    # Quick exit if sigma >= ||b||.  Set tau = 0 to short-circuit the loop.
    if bnorm <= sigma:
        logger.warning('W: sigma >= ||b||.  Exact solution is x = 0.')
        tau = 0
        single_tau = True

    # Do not do subspace minimization if x is complex.
    if not realx and subspace_min:
        logger.warning(
            'W: Subspace minimization disabled when variables are complex.')
        subspace_min = False

    #% Pre-allocate iteration info vectors
    xnorm1 = np.zeros(min(iter_lim + 1, _allocSize))
    rnorm2 = np.zeros(min(iter_lim + 1, _allocSize))
    lambdaa = np.zeros(min(iter_lim + 1, _allocSize))

    # Log header.
    if verbosity >= 1:
        _printf(fid, '')
        _printf(fid, '=' * 80 + '')
        _printf(fid, 'SPGL1')
        _printf(fid, '=' * 80 + '')
        _printf(fid, '%-22s: %8i %4s' % ('No. rows', m, ''))
        _printf(fid, '%-22s: %8i\n' % ('No. columns', n))
        _printf(fid, '%-22s: %8.2e %4s' % ('Initial tau', tau, ''))
        _printf(fid, '%-22s: %8.2e\n' % ('Two-norm of b', bnorm))
        _printf(fid, '%-22s: %8.2e %4s' % ('Optimality tol', opt_tol, ''))
        if single_tau:
            _printf(fid, '%-22s: %8.2e\n' % ('Target one-norm of x', tau))
        else:
            _printf(fid, '%-22s: %8.2e\n' % ('Target objective', sigma))
        _printf(fid, '%-22s: %8.2e %4s' % ('Basis pursuit tol', bp_tol, ''))
        _printf(fid, '%-22s: %8i\n' % ('Maximum iterations', iter_lim))
        if verbosity >= 2:
            if single_tau:
                logb = '%5i  %13.7e  %13.7e  %9.2e  %6.1f  %6i  %6i %6s'
                logh = '%5s  %13s  %13s  %9s  %6s  %6s  %6s\n'
                _printf(
                    fid, logh % ('iterr', 'Objective', 'Relative Gap', 'gnorm',
                                 'stepg', 'nnz_x', 'nnz_g'))
            else:
                logb = '%5i  %13.7e  %13.7e  %9.2e  %9.3e  %6.1f  %6i  %6i %6s'
                logh = '%5s  %13s  %13s  %9s  %9s  %6s  %6s  %6s  %6s\n'
                _printf(
                    fid,
                    logh % ('iterr', 'Objective', 'Relative Gap', 'Rel Error',
                            'gnorm', 'stepg', 'nnz_x', 'nnz_g', 'tau'))

    # Project the starting point and evaluate function and gradient.
    start_time_project = time.time()
    x = project(x, weights, tau)
    time_project += time.time() - start_time_project
    start_time_matvec = time.time()
    r = b - A.matvec(x)  # r = b - Ax
    g = -A.rmatvec(r)  # g = -A'r
    time_matprod += time.time() - start_time_matvec
    f = np.linalg.norm(r)**2 / 2.
    nprodA += 1
    nprodAt += 1

    # Required for nonmonotone strategy.
    last_fv[0] = f
    fbest = f
    xbest = x.copy()
    fold = f

    # Compute projected gradient direction and initial step length.
    start_time_project = time.time()
    dx = project(x - g, weights, tau) - x
    time_project += time.time() - start_time_project
    dxnorm = np.linalg.norm(dx, np.inf)
    if dxnorm < (1. / step_max):
        gstep = step_max
    else:
        gstep = min(step_max, max(step_min, 1. / dxnorm))

    # Main iteration loop.
    while 1:
        # Test exit conditions.

        # Compute quantities needed for log and exit conditions.
        gnorm = dual_norm(-g, weights)
        rnorm = np.linalg.norm(r)
        gap = np.dot(np.conj(r), r - b) + tau * gnorm
        rgap = abs(gap) / max(1., f)
        aerror1 = rnorm - sigma
        aerror2 = f - sigma**2. / 2.
        rerror1 = abs(aerror1) / max(1., rnorm)
        rerror2 = abs(aerror2) / max(1., f)

        #% Count number of consecutive iterations with identical support.
        nnz_old = nnz_idx
        nnz_x, nnz_g, nnz_idx, nnz_diff = _active_vars(x, g, nnz_idx, opt_tol,
                                                       weights, dual_norm)
        if nnz_diff:
            nnz_niters = 0
        else:
            nnz_niters += nnz_niters
            if nnz_niters + 1 >= active_set_niters:
                stat = EXIT_ACTIVE_SET

        # Single tau: Check if were optimal.
        # The 2nd condition is there to guard against large tau.
        if single_tau:
            if rgap <= opt_tol or rnorm < opt_tol * bnorm:
                stat = EXIT_OPTIMAL
        else:  # Multiple tau: Check if found root and/or if tau needs updating.
            # Test if a least-squares solution has been found
            if gnorm <= ls_tol * rnorm:
                stat = EXIT_LEAST_SQUARES
            if rgap <= max(opt_tol, rerror2) or rerror1 <= opt_tol:
                # The problem is nearly optimal for the current tau.
                # Check optimality of the current root.
                if rnorm <= sigma:
                    stat = EXIT_SUBOPTIMAL_BP  # Found suboptimal BP sol.
                if rerror1 <= opt_tol:
                    stat = EXIT_ROOT_FOUND  # Found approx root.
                if rnorm <= bp_tol * bnorm:
                    stat = EXIT_BPSOL_FOUND  # Resid minimzd -> BP sol.
            fchange = np.abs(f - fold)
            test_relchange1 = fchange <= dec_tol * f
            test_relcchange2 = fchange <= 1e-1 * f * (np.abs(rnorm - sigma))
            test_updatetau = ((test_relchange1 and rnorm > 2 * sigma) or \
                              (test_relcchange2 and rnorm <= 2 * sigma)) and \
                             not stat and not test_updatetau

            if test_updatetau:
                # Update tau.
                tau_old = tau
                tau = max(0, tau + (rnorm * aerror1) / gnorm)
                n_newton += 1
                print_tau = np.abs(tau_old -
                                   tau) >= 1e-6 * tau  # For log only.
                if tau < tau_old:
                    # The one-norm ball has decreased. Need to make sure that
                    # the next iterate is feasible, which we do by projecting it.
                    start_time_project = time.time()
                    x = project(x, weights, tau)
                    time_project += time.time() - start_time_project

                    # Update the residual, gradient, and function value
                    start_time_matvec = time.time()
                    r = b - A.matvec(x)
                    g = -A.rmatvec(r)
                    time_matprod += time.time() - start_time_matvec

                    f = np.linalg.norm(r)**2 / 2.
                    nprodA += 1
                    nprodAt += 1

                    # Reset the function value history.
                    last_fv = np.full(10, -np.inf)
                    last_fv[1] = f

        # Too many iterations and not converged.
        if not stat and niters >= iter_lim:
            stat = EXIT_ITERATIONS

        # Print log, update history and act on exit conditions.
        if verbosity >= 2 and \
                (((niters < 10) or (iter_lim - niters < 10) or (niters % 10 == 0))
                 or single_tau or print_tau or stat):
            tauflag = '              '
            subflag = ''
            if print_tau:
                tauflag = ' %13.7e' % tau
            if subspace:
                subflag = ' S %2i' % niters_lsqr
            if single_tau:
                _printf(
                    fid, logb % (niters, rnorm, rgap, gnorm, np.log10(stepg),
                                 nnz_x, nnz_g, subflag))
                if subspace:
                    _printf(fid, '  %s' % subflag)
            else:
                _printf(
                    fid,
                    logb % (niters, rnorm, rgap, rerror1, gnorm,
                            np.log10(stepg), nnz_x, nnz_g, tauflag + subflag))
        print_tau = False
        subspace = False

        # Update history info
        if niters > 0 and niters % _allocSize == 0:  # enlarge allocation
            allocincrement = min(_allocSize, iter_lim - xnorm1.shape[0])
            xnorm1 = np.hstack((xnorm1, np.zeros(allocincrement)))
            rnorm2 = np.hstack((rnorm2, np.zeros(allocincrement)))
            lambdaa = np.hstack((lambdaa, np.zeros(allocincrement)))

        xnorm1[niters] = primal_norm(x, weights)
        rnorm2[niters] = rnorm
        lambdaa[niters] = gnorm

        if stat:
            break

        # Iterations begin here.
        niters += 1
        xold = x.copy()
        fold = f.copy()
        gold = g.copy()
        rold = r.copy()

        while 1:
            # Projected gradient step and linesearch.
            f, x, r, niter_line, stepg, lnerr, \
            time_project_curvy, time_matprod_curvy = \
               _spg_line_curvy(x, gstep*g, max(last_fv), A, b,
                               project, weights, tau)
            time_project += time_project_curvy
            time_matprod += time_matprod_curvy
            nprodA += niter_line + 1
            nline_tot += niter_line
            if nprodA + nprodAt > max_matvec:
                stat = EXIT_MATVEC_LIMIT
                break

            if lnerr:
                # Projected backtrack failed.
                # Retry with feasible dirn linesearch.
                x = xold.copy()
                f = fold
                start_time_project = time.time()
                dx = project(x - gstep * g, weights, tau) - x
                time_project += time.time() - start_time_project
                gtd = np.dot(np.conj(g), dx)
                f, x, r, niter_line, lnerr, time_matprod = \
                    _spg_line(f, x, dx, gtd, max(last_fv), A, b)
                time_matprod += time_matprod
                nprodA += niter_line + 1
                nline_tot += niter_line
                if nprodA + nprodAt > max_matvec:
                    stat = EXIT_MATVEC_LIMIT
                    break

                if lnerr:
                    # Failed again.
                    # Revert to previous iterates and damp max BB step.
                    x = xold
                    f = fold
                    if max_line_errors <= 0:
                        stat = EXIT_LINE_ERROR
                    else:
                        step_max = step_max / 10.
                        logger.warning(
                            'Linesearch failed with error %s. '
                            'Damping max BB scaling to %s', lnerr, step_max)
                        max_line_errors -= 1

            # Subspace minimization (only if active-set change is small).
            if subspace_min:
                start_time_matvec = time.time()
                g = -A.rmatvec(r)
                time_matprod += time.time() - start_time_matvec
                nprodAt += 1
                nnz_x, nnz_g, nnz_idx, nnz_diff = \
                    _active_vars(x, g, nnz_old, opt_tol, weights, dual_norm)
                if not nnz_diff:
                    if nnz_x == nnz_g:
                        iter_lim_lsqr = 20
                    else:
                        iter_lim_lsqr = 5
                    nnz_idx = np.abs(x) >= opt_tol

                    ebar = np.sign(x[nnz_idx])
                    nebar = np.size(ebar)
                    Sprod = _LSQRprod(A, nnz_idx, ebar, n)

                    dxbar, istop, niters_lsqr = \
                       lsqr(Sprod, r, 1e-5, 1e-1, 1e-1, 1e12,
                            iter_lim=iter_lim_lsqr, show=0)[0:3]
                    nprodA += niters_lsqr
                    nprodAt += niters_lsqr + 1
                    niters_lsqr = niters_lsqr + niters_lsqr

                    # LSQR iterations successful. Take the subspace step.
                    if istop != 4:
                        # Push dx back into full space: dx = Z dx.
                        dx = np.zeros(n, dtype=x.dtype)
                        dx[nnz_idx] = \
                            dxbar - (1/nebar)*np.dot(np.dot(np.conj(ebar.T),
                                                            dxbar), dxbar)

                        # Find largest step to a change in sign.
                        block1 = nnz_idx & (x < 0) & (dx > +piv_tol)
                        block2 = nnz_idx & (x > 0) & (dx < -piv_tol)
                        alpha1 = np.inf
                        alpha2 = np.inf
                        if np.any(block1):
                            alpha1 = min(-x[block1] / dx[block1])
                        if np.any(block2):
                            alpha2 = min(-x[block2] / dx[block2])
                        alpha = min([1, alpha1, alpha2])
                        if alpha < 0:
                            raise ValueError('Alpha smaller than zero')
                        if np.dot(np.conj(ebar.T), dx[nnz_idx]) > opt_tol:
                            raise ValueError('Subspace update signed sum '
                                             'bigger than tolerance')
                        # Update variables.
                        x = x + alpha * dx
                        start_time_matvec = time.time()
                        r = b - A.matvec(x)
                        time_matprod += time.time() - start_time_matvec
                        f = abs(np.dot(np.conj(r), r)) / 2.
                        subspace = True
                        nprodA += 1

                if primal_norm(x, weights) > tau + opt_tol:
                    raise ValueError('Primal norm out of bound')

            # Update gradient and compute new Barzilai-Borwein scaling.
            if not lnerr:
                start_time_matvec = time.time()
                g = -A.rmatvec(r)
                time_matprod += time.time() - start_time_matvec
                nprodAt += 1
                s = x - xold
                y = g - gold
                sts = np.dot(np.conj(s), s)
                sty = np.dot(np.conj(s), y)
                if sty <= 0:
                    gstep = step_max
                else:
                    gstep = min(step_max, max(step_min, sts / sty))
            else:
                gstep = min(step_max, gstep)
            break  # Leave while loop. This is done to allow stopping the
            # computations at any time within the loop if max_matvec is
            # reached. If this is not the case, the loop is stopped here.

        if stat == EXIT_MATVEC_LIMIT:
            niters -= 1
            x = xold.copy()
            f = fold
            g = gold.copy()
            r = rold.copy()
            break

        #  Update function history.
        if single_tau or f > sigma**2 / 2.:  # Dont update if superoptimal.
            last_fv[np.mod(niters, n_prev_vals)] = f.copy()
            if fbest > f:
                fbest = f.copy()
                xbest = x.copy()

    # Restore best solution (only if solving single problem).
    if single_tau and f > fbest:
        rnorm = np.sqrt(2. * fbest)
        print('Restoring best iterate to objective ' + str(rnorm))
        x = xbest.copy()
        start_time_matvec = time.time()
        r = b - A.matvec(x)
        g = -A.rmatvec(r)
        time_matprod += time.time() - start_time_matvec
        gnorm = dual_norm(g, weights)
        rnorm = np.linalg.norm(r)
        nprodA += 1
        nprodAt += 1

    # Final cleanup before exit.
    info = {}
    info['tau'] = tau
    info['rnorm'] = rnorm
    info['rgap'] = rgap
    info['gnorm'] = gnorm
    info['stat'] = stat
    info['niters'] = niters
    info['nprodA'] = nprodA
    info['nprodAt'] = nprodAt
    info['n_newton'] = n_newton
    info['time_project'] = time_project
    info['time_matprod'] = time_matprod
    info['niters_lsqr'] = niters_lsqr
    info['time_total'] = time.time() - start_time
    info['xnorm1'] = xnorm1[0:niters]
    info['rnorm2'] = rnorm2[0:niters]
    info['lambdaa'] = lambdaa[0:niters]

    # Print final output.
    if verbosity >= 1:
        _printf(fid, '')
        if stat == EXIT_OPTIMAL:
            _printf(fid, 'EXIT -- Optimal solution found')
        elif stat == EXIT_ITERATIONS:
            _printf(fid, 'ERROR EXIT -- Too many iterations')
        elif stat == EXIT_ROOT_FOUND:
            _printf(fid, 'EXIT -- Found a root')
        elif stat == EXIT_BPSOL_FOUND:
            _printf(fid, 'EXIT -- Found a BP solution')
        elif stat == EXIT_LEAST_SQUARES:
            _printf(fid, 'EXIT -- Found a least-squares solution')
        elif stat == EXIT_LINE_ERROR:
            _printf(fid, 'ERROR EXIT -- Linesearch error (%d)' % lnerr)
        elif stat == EXIT_SUBOPTIMAL_BP:
            _printf(fid, 'EXIT -- Found a suboptimal BP solution')
        elif stat == EXIT_MATVEC_LIMIT:
            _printf(fid, 'EXIT -- Maximum matrix-vector operations reached')
        elif stat == EXIT_ACTIVE_SET:
            _printf(fid, 'EXIT -- Found a possible active set')
        else:
            _printf(fid, 'SPGL1 ERROR: Unknown termination condition')
        _printf(fid, '')

        _printf(
            fid, '%-20s:  %6i %6s %-20s:  %6.1f' %
            ('Products with A', nprodA, '', 'Total time   (secs)',
             info['time_total']))
        _printf(
            fid, '%-20s:  %6i %6s %-20s:  %6.1f' %
            ('Products with A^H', nprodAt, '', 'Project time (secs)',
             info['time_project']))
        _printf(
            fid, '%-20s:  %6i %6s %-20s:  %6.1f' %
            ('Newton iterations', n_newton, '', 'Mat-vec time (secs)',
             info['time_matprod']))
        _printf(
            fid, '%-20s:  %6i %6s %-20s:  %6i' %
            ('Line search its', nline_tot, '', 'Subspace iterations',
             niters_lsqr))

    return x, r, g, info
Пример #43
0
 def is_conj_pair(x, y):
     return abs(np.conj(x) - y) < 1e-6
Пример #44
0
 def _rmatvec(self, x):
     y = self.A.rmatvec(x)
     z = y[self.nnz_idd] - \
         (1. / self.nbar) * np.dot(np.dot(np.conj(self.ebar),
                                          y[self.nnz_idd]), self.ebar)
     return z
Пример #45
0
    def spatialcorr(self,
                    outputfile,
                    l=6,
                    ppp=[1, 1],
                    rdelta=0.01,
                    results_path='../../analysis/order2d/'):
        """ Calculate spatial correlation of bond orientational order
            
            l is the order ranging from 4 to 8 normally in 2D
            ppp is periodic boundary conditions. 1 for yes and 0 for no
            rdelta is the bin size in g(r)
        """
        if not os.path.exists(results_path):
            os.makedirs(results_path)

        ParticlePhi = self.lthorder(l, ppp)
        MAXBIN = int(self.Boxlength.min() / 2.0 / rdelta)
        grresults = np.zeros((MAXBIN, 3))
        names = 'r   g(r)   gl(r)   gl/g(r)l=' + str(l)

        for n in range(self.SnapshotNumber):
            for i in range(self.ParticleNumber - 1):
                RIJ = self.Positions[n, i + 1:] - self.Positions[n, i]
                periodic = np.where(
                    np.abs(RIJ / self.Boxlength) > 0.5, np.sign(RIJ),
                    0).astype(np.int)
                RIJ -= self.Boxlength * periodic * ppp  #remove PBC
                distance = np.sqrt(np.square(RIJ).sum(axis=1))
                Countvalue, BinEdge = np.histogram(distance,
                                                   bins=MAXBIN,
                                                   range=(0, MAXBIN * rdelta))
                grresults[:, 0] += Countvalue
                PHIIJ = np.real(ParticlePhi[n, i + 1:] *
                                np.conj(ParticlePhi[n, i]))
                Countvalue, BinEdge = np.histogram(distance,
                                                   bins=MAXBIN,
                                                   range=(0, MAXBIN * rdelta),
                                                   weights=PHIIJ)
                grresults[:, 1] += Countvalue

        binleft = BinEdge[:-1]
        binright = BinEdge[1:]
        Nideal = np.pi * (binright**2 - binleft**2) * self.rhototal
        grresults[:,
                  0] = grresults[:,
                                 0] * 2 / self.ParticleNumber / self.SnapshotNumber / Nideal
        grresults[:,
                  1] = grresults[:,
                                 1] * 2 / self.ParticleNumber / self.SnapshotNumber / Nideal
        grresults[:, 2] = np.where(grresults[:, 0] != 0,
                                   grresults[:, 1] / grresults[:, 0], np.nan)

        binright = binright - 0.5 * rdelta
        results = np.column_stack((binright, grresults))
        np.savetxt(results_path + outputfile,
                   results,
                   fmt='%.6f',
                   header=names,
                   comments='')
        print('-----------Get gl(r) results Done-----------')
        return results
Пример #46
0
    def get_slowest_pole(self, h_step, dt, NP, known_poles, stride):
        #print('\nget_slowest called with', NP, known_poles, stride)

        def d(n, stride):
            return np.array([h_step[n+stride*i] for i in range(NP+1)])

        def get_data_for_stride(stride):
            num_samples = len(h_step) - NP*stride
            samples = [d(n, stride) for n in range(num_samples)]

            # make samples rows in a (tall) matrix
            sample_matrix = np.stack(samples, 0)
            return sample_matrix

        data = get_data_for_stride(stride)

        # We split the collected data into the initial points, and the final point
        A = data[:,:-1]
        B = data[:,-1:]

        # Consider 5 poles, 2 already known
        # We can do some linear algebra to find column vector X such that
        # x0*a[n] + x1*a[n+1] + x2*a[n+2] + x3*a[n+3] + x4*a[n+4] + a[n+5] = 0
        # a[n](x0 + x1*r + x2*r^2 + x3*r^3 + x4*r^4 + r^5) = 0
        # r^5 + x4*r^4 + x3*r^3 + x2*r^2 + x1*r + x0 = 0
        # and then solve this polynomial to find the roots

        # BUT
        # With 2 known poles, we know this should factor out to
        # (r-p1)(r-p2)(r^3 + y0*r^2 + y1*r + y2) = 0
        # So we really want to use our linear algebra to find the Y vector instead
        # First we need a matrix Z, which depends on the known ps, s.t. X = ZY, (5x1) = (5x3)(3x1)
        # Then our linear algebra that was AX+B=0 becomes AZY+B=0, which is easily solvable for Y

        # Step 1: find Z
        # A a reminder, X = ZY where X and Y are defined as:
        # r^5 + x4*r^4 + x3*r^3 + x2*r^2 + x1*r + x0 = 0
        # (r-p1)(r-p2)(r^3 + y2*r^2 + y1*r + y0) = 0
        # Define C, which is coefficients of poly from known roots
        # r^2 + (-p1-p2)*r + p1*p2 -> c0=p1*p2, c1=(-p1-p2)
        # We see that each term in X[i] is a product of Y[j] and C[k] terms s.t. i=j+k
        # SO we can directly write Z[i,j] = C[i-j], or 0 if that's outside the C bounds

        # BE CAREFUL about the leading 1s in these polynomials.
        # In our example, the full Z would be 6x4, including leading 1s. It's okay to drop the
        # bottom row because it only contributes to the leading 1 of x, which we want to drop.
        # But we can't drop the right column, which corresponds to the leading 1 of Y, because
        # it contributes to other rows in X.
        # Z: 5x3 version of Z, with bottom row and right column dropped
        # Z~: 5x4 version of Z, with only bottom row dropped
        # Y~: 4x1 version of Y, with a constant one in the fourth spot
        # A Z~ Y~ == -B
        # We can't use least-squares to find Y right now because of that required constant 1
        # E: 4x3 almost-identity
        # F: 4x1 column, [0,0,0,1]
        # A Z~ (E Y + F) == -B
        # A Z~ E Y  +  A Z~ F == -B
        # A Z Y == -B - A Z~_last_column
        # So we need to do a modified regression: we can drop that extra column on Z~, but we have
        # to first use it to modify the B vector
        # Similarly, X = Z~ Y~ becomes X = Z Y + Z~_last_column

        known_rs = np.exp(np.array(known_poles)*stride)
        if np.isinf(known_rs).any():
            # probably got a bad pole in known_poles, should just give up
            return []

        poly = np.polynomial.polynomial
        C = poly.polyfromroots(known_rs)

        Z_tilde = np.zeros((NP, NP-len(known_rs)+1), dtype=C.dtype)
        for i in range(Z_tilde.shape[0]):
            for j in range(Z_tilde.shape[1]):
                k = i-j
                if k >= 0 and k < len(C):
                    Z_tilde[i,j] = C[k]
        Z = Z_tilde[:,:-1]
        Z_column = Z_tilde[:,-1:]

        Y = np.linalg.pinv(A@Z) @ (-B - (A@Z_column))
        X = Z@Y + Z_column

        # x0 * d0 + x1 * d2 + d2 = 0
        # a[n](x0 + x1*r + r^2) = 0

        poly = np.concatenate([[1], X[::-1,0]])

        #print('poly', poly)
        roots = np.roots(poly)
        #print('roots', roots)

        # errors often cause small roots to go negative when they shouldn't.
        # This messes with the log, so we explicitly call those nan.
        def mylog(x):
            if (np.real(x) == 0):
                return float('nan')
            if not abs(np.imag(x)/np.real(x)) > 1e-6 and np.real(x) <= 0:
                return float('nan')
            else:
                return np.log(x)
        ps_with_stride = np.vectorize(mylog)(roots)
        ps = ps_with_stride / (stride * dt)

        # remove known poles
        key=lambda x: float('inf') if np.isnan(x) else abs(x)
        new_ps = sorted(ps, key=key)
        #print('Before processing, found ps', new_ps)
        for known_p in known_poles:
            for i in range(len(new_ps)):
                # TODO is this epsilon reasonable when ps are likely ~1e10?
                if new_ps[i] is not None and abs((new_ps[i] - known_p) < 1e-6):
                    new_ps.pop(i)
                    break
            else:
                if np.isnan(new_ps).any():
                    # the nans are probably causing the error
                    return []
                #print(known_poles)
                #print(new_ps)
                assert False, f'Known pole {known_p} not found!'

        # finally, return 0 (if nan), 1, or 2 (if complex conjugate) slowest new poles
        #print('After processing, new ps', new_ps)
        assert len(new_ps) > 0, 'Found no new poles ... check NP and len(known_poles)'
        if abs(np.imag(new_ps[0])) > 1e-6:
            # complex conjugate pair
            #print(ps, new_ps)
            assert len(new_ps) >= 2, 'Only found one of complex conjugate pair?'
            if abs(np.conj(new_ps[0]) - new_ps[1]) > 1e-6 and np.isnan(new_ps).any():
                return []
            assert abs(np.conj(new_ps[0]) - new_ps[1]) < 1e-6, 'Issue with conjugate pair, check sorting?'
            return new_ps[:2]
        elif not np.isnan(new_ps[0]):
            return new_ps[:1]
        else:
            # empty list
            return new_ps[:0]
Пример #47
0
     p.semilogy(S)
     p.subplot(236)
     capo.arp.waterfall(V, drng=3)
     p.show()
     p.subplot(311)
     capo.arp.waterfall(x[m], mode='real', mx=5, drng=10)
     p.colorbar(shrink=.5)
     p.subplot(312)
     capo.arp.waterfall(_Cx, mode='real')
     p.colorbar(shrink=.5)
     p.subplot(313)
     capo.arp.waterfall(_Ix, mode='real')
     p.colorbar(shrink=.5)
     p.show()
 if False:  #use ffts to do q estimation fast
     qI += n.conj(_Iz[k1][bl1]) * _Iz[k2][bl2]
     qC += n.conj(_Cz[k1][bl1]) * _Cz[k2][bl2]
 else:  #brute force with Q to ensure normalization
     #_qI = n.array([_Iz[k1][bl1].conj() * n.dot(Q[i], _Iz[k2][bl2]) for i in xrange(nchan)])
     #_qC = n.array([_Cz[k1][bl1].conj() * n.dot(Q[i], _Cz[k2][bl2]) for i in xrange(nchan)])
     if not Q_Iz[k2].has_key(bl2):
         Q_Iz[k2][bl2] = [
             n.dot(Q[i], _Iz[k2][bl2])
             for i in xrange(nchan)
         ]
     if not Q_Cz[k2].has_key(bl2):
         Q_Cz[k2][bl2] = [
             n.dot(Q[i], _Cz[k2][bl2])
             for i in xrange(nchan)
         ]
     _qI = n.array([
Пример #48
0
def omlsa_streamer(frame,
                   fs,
                   frame_length,
                   frame_move,
                   plot=None,
                   postprocess=None,
                   high_cut=6000):
    global loop_i, frame_buffer, frame_out, frame_in, frame_result, y_out_time, l_mod_lswitch, lambda_d, eta_2term, S, St, lambda_dav, Smin, Smin_sw, Smint_sw, Smint, zi, G, conv_Y
    start = time.time()
    input = frame
    input = input.reshape(frame_move, )

    # #################### Core Algorithm ####################
    # '''OMLSA LOOP'''
    # '''For all time frames'''
    if loop_i < 1:
        loop_i = loop_i + 1
        frame_buffer = np.concatenate((frame_buffer, input))
        return frame

    else:
        if loop_i == 1:
            frame_buffer = frame_buffer[0:128]
        else:
            frame_buffer = frame_buffer[-128:]
        # print(frame_buffer)
        frame_buffer = np.concatenate((frame_buffer, input))
        frame_in = frame_buffer
        frame_out = np.concatenate(
            (frame_out[frame_move:], np.zeros((frame_move, ))))
        Y = np.fft.fft(frame_in * win)
        Ya2 = np.power(abs(Y[0:N_eff]), 2)
        Sf = np.convolve(win_freq.flatten(), Ya2.flatten())
        '''frequency smoothing '''

        Sf = Sf[f_win_length:N_eff + f_win_length]
        '''initialization'''
        if (loop_i == 1):
            lambda_dav = lambda_d = Ya2
            gamma = 1
            GH1 = 1
            eta_2term = np.power(GH1, 2) * gamma
            S = Smin = St = Smint = Smin_sw = Smint_sw = Sf

        # if (loop_i < 30) or (loop_i % 2 == 1):
        if True:
            '''instant SNR'''
            gamma = np.divide(Ya2, np.maximum(lambda_d, 1e-10))
            ''' update smoothed SNR, eq.18, where eta_2term = GH1 .^ 2 .* gamma'''
            eta = alpha_eta * eta_2term + (1 - alpha_eta) * np.maximum(
                (gamma - 1), 0)

            eta = np.maximum(eta, eta_min)
            v = np.divide(gamma * eta, (1 + eta))

            GH1 = np.divide(eta, (1 + eta)) * np.exp(0.5 * expint(v))

            S = alpha_s * S + (1 - alpha_s) * Sf

            if (loop_i < 30):
                Smin = S
                Smin_sw = S

            else:
                Smin = np.minimum(Smin, S)
                Smin_sw = np.minimum(Smin_sw, S)

            gamma_min = np.divide((Ya2 / Bmin), Smin)
            zeta = np.divide(S / Bmin, Smin)

            I_f = np.zeros((N_eff, ))
            I_f[gamma_min < gamma0] = 1
            I_f[zeta < zeta0] = 1

            conv_I = np.convolve(win_freq, I_f)
            '''smooth'''
            conv_I = conv_I[f_win_length:N_eff + f_win_length]

            Sft = St

            conv_Y = np.convolve(win_freq.flatten(), (I_f * Ya2).flatten())
            '''eq. 26'''
            conv_Y = conv_Y[f_win_length:N_eff + f_win_length]

            Sft = St
            Sft = np.divide(conv_Y, conv_I)
            Sft[(conv_I) == 0] = St[(conv_I) == 0]

            St = alpha_s * St + (1 - alpha_s) * Sft
            '''updated smoothed spec eq. 27'''

            if (loop_i < 30):
                Smint = St
                Smint_sw = St
            else:
                Smint = np.minimum(Smint, St)
                Smint_sw = np.minimum(Smint_sw, St)

            gamma_mint = np.divide(Ya2 / Bmin, Smint)
            zetat = np.divide(S / Bmin, Smint)
            '''eq. 29 speech absence probability'''
            '''eq. 29 init p(speech active|gama)'''

            temp = [0] * N_eff

            # find prior probability of speech presence
            qhat = (gamma1 - gamma_mint) / (gamma1 - 1)
            qhat[gamma_mint < 1] = 1
            qhat[gamma_mint < gamma1] = 1
            qhat[zetat < zeta0] = 1
            qhat[gamma_mint >= gamma1] = 0
            qhat[zetat >= zeta0] = 0

            phat = np.divide(1, (1 + np.divide(qhat, (1 - qhat)) *
                                 (1 + eta) * np.exp(-v)))
            phat[gamma_mint >= gamma1] = 1
            phat[zetat >= zeta0] = 1

            alpha_dt = alpha_d + (1 - alpha_d) * phat
            lambda_dav = alpha_dt * lambda_dav + (1 - alpha_dt) * Ya2
            lambda_d = lambda_dav * beta

            if l_mod_lswitch == 2 * Vwin:
                '''reinitiate every Vwin frames'''
                l_mod_lswitch = 0
                try:
                    SW = np.concatenate((SW[1:Nwin], Smin_sw))
                    Smin = np.amin(SW)
                    Smin_sw = S
                    SWt = np.concatenate((SWt[1:Nwin], Smint_sw))
                    Smint = np.amin(SWt)
                    Smint_sw = St

                # initialize
                except:
                    SW = np.tile(S, (Nwin))
                    SWt = np.tile(St, (Nwin))

            l_mod_lswitch = l_mod_lswitch + 1

            gamma = np.divide(Ya2, np.maximum(lambda_d, 1e-10))
            '''update instant SNR'''

            eta = alpha_eta * eta_2term + (1 - alpha_eta) * np.maximum(
                gamma - 1, 0)

            eta[eta < eta_min] = eta_min

            v = np.divide(gamma * eta, (1 + eta))

            GH1 = np.divide(eta, (1 + eta)) * np.exp(0.5 * expint(v))

            G = np.power(GH1, phat) * np.power(GH0, (1 - phat))

            eta_2term = np.power(GH1, 2) * gamma
        '''eq. 18'''

        X = np.concatenate((np.zeros(
            (3, )), (G[3:N_eff - 1]) * (Y[3:N_eff - 1]), [0]))

        X_2 = X[1:N_eff - 1]
        X_2 = X_2[::-1]
        X_other_half = np.conj(X_2)
        X = np.concatenate((X, X_other_half))
        '''extend the anti-symmetric range of the spectum'''

        temp = np.real(np.fft.ifft(X))

        frame_result = win * temp * Cwin * Cwin

        frame_out = frame_out + frame_result
        output, zi = bandpass(frame_out[0:frame_move], postprocess, high_cut,
                              fs, zi)  # bandpass the signal
        # print(len(frame_out))
        loop_i = loop_i + 1

    # print(time.time()-start)
    return (output)
Пример #49
0
add_c = np.ones_like(c)
add = c + add_c
mul = c * add_c
mitrix_mul = c.dot(add_c.T)
add_c *= 3
sum = f.sum(axis=0)  #按0轴比较
max = f.max(axis=1)
min = f.min(axis=1)
# print(sum,max,min)
#通用函数
num = np.linspace(0, np.pi, 6)
sin = np.sin(num)
exp = np.exp(num)
log = np.log2(np.array([2, 2**2, 2**3]))
abs = np.absolute(np.negative(np.arange(3)))
H = np.conj(complex.T)
# print(H)
#索引、切片和迭代
onedim = np.array([0, 1, 2, 3, 4, 5])
twodim = np.arange(16).reshape(4, 4)
threedim = np.arange(27).reshape(3, 3, 3)
one_a = onedim[2]
one_list = onedim[2:5]
one_list2 = onedim[::2]
#多维的数组每个轴可以有一个索引。这些索引以逗号​​分隔的元组给出:
two_a = twodim[2, 3]
two_list = twodim[3, ::2]  #选取第三行,隔1位选取
three_a = threedim[2, 1, 0]
three_list = threedim[1, :, 1]
# print(np.floor(3.6))
#改变数组形状,拷贝与视图
Пример #50
0
def normalise_columns(A):

    G = np.diag(np.diag(np.conj(A).T @ A))
    return A @ np.linalg.inv(np.sqrt(G))
Пример #51
0
#
# $$x[n] = \frac{1}{N} \sum_{k=0}^{N-1} y]k]\exp \left ( i 2 \pi k n /N \right )$$

# %% [markdown]
# What about the imaginary part?  All imaginary coefficients are zero (neglecting roundoff errors)

# %%
imag_coeffs=np.imag(thefft)
fig,theAx=plt.subplots(1,1,figsize=(8,6))
theAx.plot(imag_coeffs)
out=theAx.set_title('imag fft of onehz')

# %%
#now evaluate the power spectrum using Stull's 8.6.1a on p. 312

Power=np.real(thefft*np.conj(thefft))
totsize=len(thefft)
halfpoint=int(np.floor(totsize/2.))
firsthalf=Power[0:halfpoint] 


fig,ax=plt.subplots(1,1)
freq=np.arange(0,5.,0.05)
ax.plot(freq[0:halfpoint],firsthalf)
ax.set_title('power spectrum')
out=ax.set_xlabel('frequency (Hz)')
len(freq)

# %% [markdown]
# Check Stull 8.6.1b (or Numerical Recipes 12.0.13) which says that squared power spectrum = variance
#
Пример #52
0
 def calc_ft(self):
     return np.conj(super(BraKetPairEmiFiniteT, self).calc_ft())
Пример #53
0
    def minimize(self, sess, epoch, feed_dict):
        self._epoch = epoch

        if epoch == 0:
            print('Reset var_old')
            self._var_old_np = []
            for param_idx in range(len(self._params.get(as_dict=False))):
                if self._params.get_prox()[param_idx] is not None:
                    if self._params.get_tau()[param_idx] is not None:
                          sess.run(self._params.get_prox()[param_idx],
                                 feed_dict={self._params.get_tau()[param_idx]: 1./self._L_np[param_idx][0]})
                    else:
                        sess.run(self._params.get_prox()[param_idx])
                var_old_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)
                if self._params.get_np_prox()[param_idx] is not None:
                    var_old_np = self._params.get_np_prox()[param_idx](var_old_np, 1. / self._L_np[param_idx][0])
                self._var_old_np.append(var_old_np)

        if self._momentum == None:
            beta = (np.mod(epoch, self._reset_memory)) / (np.mod(epoch, self._reset_memory) + 3.0)
            print("beta:", beta)
        else:
            beta = self._momentum

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
#
        for s in range(self._num_stages):
            for param_idx in range(len(self._params.get(as_dict=False))):
                # overrelaxation and store old theta value
                theta_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)
                theta_tilde_np = theta_np.copy()
                theta_tilde_np[s] = theta_np[s] + beta * (theta_np[s] - self._var_old_np[param_idx][s])
                self._var_old_np[param_idx][s] = theta_np[s].copy()

                # update param and compute energy, gradient
                sess.run(self._update_var[param_idx], feed_dict={self._var_old[param_idx]: theta_tilde_np})
                energy_old, gradient_val = sess.run([self._energy, self._gradients[param_idx]], feed_dict=feed_dict)
                # clear the gradient for all non-active stages
                clear = np.zeros_like(gradient_val)
                clear[s, ...] = 1
                gradient_val *= clear

                # backtracking
                for bt_iter in range(1, self._max_bt_iter + 1):
                    # do the gradient step with the current step size
                    theta_np = theta_tilde_np - gradient_val / self._L_np[param_idx][s]
                    # do a np prox map
                    if self._params.get_np_prox()[param_idx] is not None:
                        theta_np = self._params.get_np_prox()[param_idx](theta_np, 1. / self._L_np[param_idx][s])

                    # update the tf variables
                    sess.run(self._update_var[param_idx], feed_dict={self._var_old[param_idx]: theta_np})
                    # do a tf prox map
                    if self._params.get_prox()[param_idx] is not None:
                        if self._params.get_tau()[param_idx] is not None:
                            sess.run(self._params.get_prox()[param_idx],
                                     feed_dict={self._params.get_tau()[param_idx]: 1./self._L_np[param_idx][s]})
                        else:
                            sess.run(self._params.get_prox()[param_idx])

                    Q_lhs = sess.run(self._energy, feed_dict=feed_dict)

                    theta_new_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)

                    Q_rhs = energy_old + np.real(np.sum((theta_new_np - theta_tilde_np) * np.conj(gradient_val))) +\
                                      self._L_np[param_idx][s] / 2.0 * np.real(np.sum((theta_new_np - theta_tilde_np) *
                                                                                      np.conj(theta_new_np - theta_tilde_np)))

                    delta = 1 + np.sign(Q_rhs) * 1e-3
                    if Q_lhs <= Q_rhs * delta:
                        self._L_np[param_idx][s] *= 0.75
                        if self._L_np[param_idx][s] <= 1e-3:
                            self._L_np[param_idx][s] = 1e-3
                        break
                    else:
                        self._L_np[param_idx][s] *= 2.0
                        if self._L_np[param_idx][s] > 1e12:
                            self._L_np[param_idx][s] = 1e12
                            break

                print('*** stage=%d, L=%f, param=%s, Qlhs=%f, Qrhs=%f ||g||=%e' % (s,
                self._L_np[param_idx][s], self._params.get(as_dict=False)[param_idx].name, Q_lhs, Q_rhs,
                                                np.sqrt(np.real(np.sum(gradient_val*np.conj(gradient_val))))))
Пример #54
0
    def rhochange(self):
        """Updated cached c array when rho changes."""

        if self.opt['HighMemSolve']:
            self.c = sl.solvedbi_sm_c(self.gDf, np.conj(self.gDf), self.rho,
                                      self.cri.axisM)
Пример #55
0
    def baseline_converter(self,
                           position_table,
                           frequency_channels=None,
                           verbose=True):
        if verbose:
            print("")
            print("Converting xyz to uvw-coordinates")
        if frequency_channels is None:
            self.reference_frequency = 150e6
        elif type(frequency_channels) == numpy.ndarray:
            assert min(
                frequency_channels
            ) > 1e6, "Frequency range is smaller 1 MHz, probably wrong units"
            self.reference_frequency = frequency_channels[0]
        elif numpy.isscalar(frequency_channels):
            assert frequency_channels > 1e6, "Frequency range is smaller 1 MHz, probably wrong units"
            self.reference_frequency = frequency_channels
        else:
            raise ValueError(
                f"frequency_channels should be 'numpy.ndarray', or scalar not type({self.reference_frequency})"
            )

        # calculate the wavelengths of the adjacent channels
        reference_wavelength = c / self.reference_frequency
        # Count the number of antenna
        number_of_antenna = position_table.number_antennas()
        # Calculate the number of possible baselines
        self.number_of_baselines = int(0.5 * number_of_antenna *
                                       (number_of_antenna - 1.))

        # Create arrays for the baselines
        # baselines x Antenna1, Antenna2, u, v, w, gain product, phase sum x channels
        antenna_1 = numpy.zeros(self.number_of_baselines)
        antenna_2 = antenna_1.copy()

        u_coordinates = antenna_1.copy()
        v_coordinates = antenna_1.copy()
        w_coordinates = antenna_1.copy()
        baseline_gains = numpy.zeros((self.number_of_baselines, 1),
                                     dtype=complex)

        if verbose:
            print("")
            print("Number of antenna =", number_of_antenna)
            print("Total number of baselines =", self.number_of_baselines)

        # arbitrary counter to keep track of the baseline table
        k = 0

        for i in range(number_of_antenna):
            for j in range(i + 1, number_of_antenna):
                # save the antenna numbers in the uv table
                antenna_1[k] = position_table.antenna_ids[i]
                antenna_2[k] = position_table.antenna_ids[j]

                # rescale and write uvw to multifrequency baseline table
                u_coordinates[k] = (
                    position_table.x_coordinates[i] -
                    position_table.x_coordinates[j]) / reference_wavelength
                v_coordinates[k] = (
                    position_table.y_coordinates[i] -
                    position_table.y_coordinates[j]) / reference_wavelength
                w_coordinates[k] = (
                    position_table.z_coordinates[i] -
                    position_table.z_coordinates[j]) / reference_wavelength
                if position_table.antenna_gains is None:
                    baseline_gains[k] = 1 + 0j
                else:
                    baseline_gains[k] = position_table.antenna_gains[
                        i] * numpy.conj(position_table.antenna_gains[j])

                k += 1

        self.antenna_id1 = antenna_1
        self.antenna_id2 = antenna_2

        self.u_coordinates = u_coordinates
        self.v_coordinates = v_coordinates
        self.w_coordinates = w_coordinates

        self.baseline_gains = baseline_gains
        return
Пример #56
0
    def minimize(self, sess, epoch, feed_dict):
        self._epoch = epoch

        if epoch == 0:
            print('Reset var_old')
            self._var_old_np = []
            for param_idx in range(len(self._params.get(as_dict=False))):
                if self._params.get_prox()[param_idx] is not None:
                    if self._params.get_tau()[param_idx] is not None:
                          sess.run(self._params.get_prox()[param_idx],
                                 feed_dict={self._params.get_tau()[param_idx]: 1./self._L_np[param_idx]})
                    else:
                        sess.run(self._params.get_prox()[param_idx])
                var_old_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)
                if self._params.get_np_prox()[param_idx] is not None:
                    var_old_np = self._params.get_np_prox()[param_idx](var_old_np, 1./self._L_np[param_idx])
                self._var_old_np.append(var_old_np)

        if self._momentum == None:
            beta = (np.mod(epoch, self._reset_memory)) / (np.mod(epoch, self._reset_memory) + 3.0)
            print("beta:", beta)
        else:
            beta = self._momentum

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
#
        for param_idx in range(len(self._params.get(as_dict=False))):
            # overrelaxation and store old theta value
            theta_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)
            theta_tilde_np = theta_np + beta * (theta_np - self._var_old_np[param_idx])
            self._var_old_np[param_idx] = theta_np.copy()

            # update param and compute energy, gradient
            sess.run(self._update_var[param_idx], feed_dict={self._var_old[param_idx]: theta_tilde_np})
            if epoch == 0:
                energy_old, gradient_val = sess.run([self._energy, self._gradients[param_idx]],
                                                    feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
                # Create the Timeline object, and write it to a json
                tl = timeline.Timeline(run_metadata.step_stats)
                ctf = tl.generate_chrome_trace_format()
                with open('timeline_%s_%d.json' % (self._params.get(as_dict=False)[param_idx].name, epoch), 'w') as f:
                    f.write(ctf)
            else:
                energy_old, gradient_val = sess.run([self._energy, self._gradients[param_idx]], feed_dict=feed_dict)

            # backtracking
            for bt_iter in range(1, self._max_bt_iter + 1):
                # do the gradient step with the current step size
                theta_np = theta_tilde_np - gradient_val/self._L_np[param_idx]
                # do a np prox map
                if self._params.get_np_prox()[param_idx] is not None:
                    theta_np = self._params.get_np_prox()[param_idx](theta_np, 1./self._L_np[param_idx])

                # update the tf variables
                sess.run(self._update_var[param_idx], feed_dict={self._var_old[param_idx]: theta_np})
                # do a tf prox map
                if self._params.get_prox()[param_idx] is not None:
                    if self._params.get_tau()[param_idx] is not None:
                        sess.run(self._params.get_prox()[param_idx],
                                 feed_dict={self._params.get_tau()[param_idx]: 1./self._L_np[param_idx]})
                    else:
                        sess.run(self._params.get_prox()[param_idx])

                Q_lhs = sess.run(self._energy, feed_dict=feed_dict)

                theta_new_np = self._params.get(as_dict=False)[param_idx].eval(session=sess)

                Q_rhs = energy_old + np.real(np.sum((theta_new_np - theta_tilde_np) * np.conj(gradient_val))) +\
                                  self._L_np[param_idx] / 2.0 * np.linalg.norm(theta_new_np - theta_tilde_np) ** 2

                delta = 1 + np.sign(Q_rhs) * 1e-3
                if Q_lhs <= Q_rhs * delta:
                    self._L_np[param_idx] *= 0.75
                    if self._L_np[param_idx] <= 1e-3:
                        self._L_np[param_idx] = 1e-3
                    break
                else:
                    self._L_np[param_idx] *= 2.0
                    if self._L_np[param_idx] > 1e12:
                        self._L_np[param_idx] = 1e12
                        break

            print('*** L=%f, param=%s, Qlhs=%f, Qrhs=%f ||g||=%e' % (
            self._L_np[param_idx], self._params.get(as_dict=False)[param_idx].name, Q_lhs, Q_rhs, np.linalg.norm(gradient_val)))
Пример #57
0
def QuarticSolverVec(a,b,c,d,e):
    """
     function [x1, x2, x3, x4]=QuarticSolverVec(a,b,c,d,e)
     v.0.2 - Python Port
     - Added condition in size sorting to avoid floating point errors.
     - Removed early loop abortion when stuck in loop (Inefficient)
     - Improved numerical stability of analytical solution
     - Added code for the case of S==0
     ============================================
     v.0.1 - Nearly identical to QuarticSolver v. 0.4, the first successful vectorized implimentation 
             Changed logic of ChosenSet to accomudate simultaneous convergence of sets 1 & 2
           - Note the periodicity in nearly-convergent solutions can other
             than four (related to text on step 4 after table 3). examples:
             period of 5: [a,b,c,d,e]=[0.111964240308252 -0.88497524334712 -0.197876116344933 -1.07336408259262 -0.373248675102065];
             period of 6: [a,b,c,d,e]=[-1.380904438798326 0.904866918945240 -0.280749330818231 0.990034312758900 1.413106456228119];
             period of 22: [a,b,c,d,e]=[0.903755513939902 0.490545114637739 -1.389679906455410 -0.875910689438623 -0.290630547104907];
             Therefore condition was changed from epsilon1(iiter)==0 to epsilon1(iiter)<8*eps (and similarl for epsilon2)
           - Special case criterion of the analytical formula was changed to
             ind=abs(4*Delta0**3./Delta1**2)<2*eps;  (instead of exact zero)
           - vectorized
     ============================================
     - Solves for the x1-x4 roots of the quartic equation y(x)=ax^4+bx^3+cx^2+dx+e.
       Multiple eqations can be soved simultaneously by entering same-sized column vectors on all inputs.
     - Note the code immediatly tanslates the input parameters ["a","b","c","d","e"] to the reference paper parameters [1,a,b,c,d] for consistency,
       and the code probably performes best when "a"=1.
    
    Parameters
    ----------
    a,b,c,d,e : ``1-D arrays``
        Quartic polynomial coefficients
    
    Returns
    ------
    - x1-x4 : ``2-D array``
        Concatenated array of the polynomial roots. The function always returns four (possibly complex) values. Multiple roots, if exist, are given multiple times. An error will result in four NaN values.
        No convergence may result in four inf values (still?)
    
    Reference: 
    Peter Strobach (2010), Journal of Computational and Applied Mathematics 234
        http://www.sciencedirect.com/science/article/pii/S0377042710002128
    """
#    MaxIter=16;
    MaxIter=50;
    eps = np.finfo(float).eps
    #INPUT CONTROL
    #Note: not all input control is implemented.
    # all-column vectors only
#    if size(a,1)~=size(b,1) or size(a,1)~=size(c,1) or size(a,1)~=size(d,1) or size(a,1)~=size(e,1) or ...
#       size(a,2)~=1 or size(b,2)~=1 or size(c,2)~=1 or size(d,2)~=1 or size(e,2)~=1:
#        fprintf('ERROR: illegal input parameter sizes.\n');
#        x1=inf; x2=inf; x3=inf; x4=inf;    
#        return
    
    # translate input variables to the paper's
    if np.any(a==0):
       print('ERROR: a==0. Not a quartic equation.\n')
       x1=np.NaN; x2=np.NaN; x3=np.NaN; x4=np.NaN;    
       return x1,x2,x3,x4
    else:
        input_a=a;
        input_b=b;
        input_c=c;
        input_d=d;
        input_e=e;
        a=input_b/input_a;
        b=input_c/input_a;
        c=input_d/input_a;
        d=input_e/input_a;
    
    # PRE-ALLOCATE MEMORY
    # ChosenSet is used to track which input set already has a solution (=non-zero value)
    ChosenSet=np.zeros_like(a);
    x1 = np.empty_like(a,complex)
    x1[:] = np.nan
    x2=x1.copy(); x3=x1.copy(); x4=x1.copy(); x11=x1.copy(); x12=x1.copy(); x21=x1.copy(); x22=x1.copy(); alpha01=x1.copy(); alpha02=x1.copy(); beta01=x1.copy(); beta02=x1.copy(); gamma01=x1.copy(); gamma02=x1.copy(); delta01=x1.copy(); delta02=x1.copy(); e11=x1.copy(); e12=x1.copy(); e13=x1.copy(); e14=x1.copy(); e21=x1.copy(); e22=x1.copy(); e23=x1.copy(); e24=x1.copy(); alpha1=x1.copy(); alpha2=x1.copy(); beta1=x1.copy(); beta2=x1.copy(); gamma1=x1.copy(); gamma2=x1.copy(); delta1=x1.copy(); delta2=x1.copy(); alpha=x1.copy(); beta=x1.copy(); gamma=x1.copy(); delta=x1.copy();
    # check multiple roots -cases 2 & 3. indexed by ChosenSet=-2
    test_alpha=0.5*a;
    test_beta=0.5*(b-test_alpha**2);
    test_epsilon=np.stack((c-2*test_alpha*test_beta, d-test_beta**2)).T;
    ind=np.all(test_epsilon==0,1);
    if np.any(ind):
        x1[ind], x2[ind]=SolveQuadratic(np.ones_like(test_alpha[ind]),test_alpha[ind],test_beta[ind]);
        x3[ind]=x1[ind]; x4[ind]=x2[ind];
        ChosenSet[ind]=-2;
    
    # check multiple roots -case 4. indexed by ChosenSet=-4
    i=ChosenSet==0;
    x11[i], x12[i]=SolveQuadratic(np.ones(np.sum(i)),a[i]/2,b[i]/6);
    x21[i]=-a[i]-3*x11[i];    
    test_epsilon[i,:2]=np.stack((c[i]+x11[i]**2*(x11[i]+3*x21[i]), d[i]-x11[i]**3*x21[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x11[ind[i]]; x2[ind[i]]=x11[ind[i]]; x3[ind[i]]=x11[ind[i]]; x4[ind[i]]=x12[ind[i]];
        ChosenSet[ind[i]]=-4;
    x22[i]=-a[i]-3*x12[i];
    test_epsilon[i,:2]=np.stack((c[i]+x12[i]**2*(x12[i]+3*x22[i]), d[i]-x12[i]**3*x22[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x21[ind[i]]; x2[ind[i]]=x21[ind[i]]; x3[ind[i]]=x21[ind[i]]; x4[ind[i]]=x22[ind[i]];
        ChosenSet[ind[i]]=-4;
    # General solution
    # initilize
    epsilon1=np.empty((np.size(a),MaxIter))
    epsilon1[:]=np.inf
    epsilon2=epsilon1.copy();
    
    i=ChosenSet==0;
    fi=np.nonzero(i)[0];
    x=np.empty((fi.size,4),complex)
    ii = np.arange(fi.size)
    #Calculate analytical root values
    x[:,0], x[:,1], x[:,2], x[:,3]=AnalyticalSolution(np.ones(np.sum(i)),a[i],b[i],c[i],d[i],eps);
    #Sort the roots in order of their size
    ind=np.argsort(abs(x))[:,::-1]; #'descend'
    x1[i]=x.flatten()[4*ii+ind[:,0]];
    x2[i]=x.flatten()[4*ii+ind[:,1]];
    x3[i]=x.flatten()[4*ii+ind[:,2]];
    x4[i]=x.flatten()[4*ii+ind[:,3]];
    #Avoiding floating point errors.
    #The value chosen is somewhat arbitrary. See Appendix C for details.
    ind = abs(x1)-abs(x4)<8*10**-12;
    x2[ind] = np.conj(x1[ind])
    x3[ind] = -x1[ind]
    x4[ind] = -x2[ind]
    #Initializing parameter values
    alpha01[i]=-np.real(x1[i]+x2[i]);
    beta01[i]=np.real(x1[i]*x2[i]);
    alpha02[i]=-np.real(x2[i]+x3[i]);
    beta02[i]=np.real(x2[i]*x3[i]);
    gamma01[i], delta01[i]=FastGammaDelta(alpha01[i],beta01[i],a[i],b[i],c[i],d[i]);
    gamma02[i], delta02[i]=FastGammaDelta(alpha02[i],beta02[i],a[i],b[i],c[i],d[i]);
    
    alpha1[i]=alpha01[i]; beta1[i]=beta01[i]; gamma1[i]=gamma01[i]; delta1[i]=delta01[i];
    alpha2[i]=alpha02[i]; beta2[i]=beta02[i]; gamma2[i]=gamma02[i]; delta2[i]=delta02[i];
    
    #Backward Optimizer Outer Loop
    e11[i]=a[i]-alpha1[i]-gamma1[i];
    e12[i]=b[i]-beta1[i]-alpha1[i]*gamma1[i]-delta1[i];
    e13[i]=c[i]-beta1[i]*gamma1[i]-alpha1[i]*delta1[i];
    e14[i]=d[i]-beta1[i]*delta1[i];
    
    e21[i]=a[i]-alpha2[i]-gamma2[i];
    e22[i]=b[i]-beta2[i]-alpha2[i]*gamma2[i]-delta2[i];
    e23[i]=c[i]-beta2[i]*gamma2[i]-alpha2[i]*delta2[i];
    e24[i]=d[i]-beta2[i]*delta2[i];
    iiter=0;
    while iiter<MaxIter and np.any(ChosenSet[i]==0):
        i=np.nonzero(ChosenSet==0)[0];
        
        alpha1[i], beta1[i], gamma1[i], delta1[i], e11[i], e12[i], e13[i], e14[i], epsilon1[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha1[i],beta1[i],gamma1[i],delta1[i],e11[i],e12[i],e13[i],e14[i]);
        alpha2[i], beta2[i], gamma2[i], delta2[i], e21[i], e22[i], e23[i], e24[i], epsilon2[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha2[i],beta2[i],gamma2[i],delta2[i],e21[i],e22[i],e23[i],e24[i]);
    
        j = np.ones_like(a[i])
        j[(epsilon2[i,iiter]<epsilon1[i,iiter]).flatten()] = 2
        BestEps = np.nanmin(np.stack([epsilon1[i,iiter].flatten(), epsilon2[i,iiter].flatten()]),0);
        ind=BestEps<8*eps;
        ChosenSet[i[ind]]=j[ind];
        ind=np.logical_not(ind);
#        if iiter>0 and np.any(ind):
#            ii=i[ind];
#            LimitCycleReached = np.empty((ii.size,2),bool)
#            LimitCycleReached[:,0] = np.any(epsilon1[ii,:iiter]==epsilon1[ii,iiter],0)
#            LimitCycleReached[:,1] = np.any(epsilon2[ii,:iiter]==epsilon2[ii,iiter],0)
##            LimitCycleReached=[any(bsxfun(@eq,epsilon1(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon1(i(ind),iiter)),2) any(bsxfun(@eq,epsilon2(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon2(i(ind),iiter)),2)];
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,0] , np.logical_not(LimitCycleReached[:,1]))]]=1;
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,1] , np.logical_not(LimitCycleReached[:,0]))]]=2;
##            ChosenSet(ii(~LimitCycleReached(:,1) & LimitCycleReached(:,2)))=2;
##            ind=find(ind);
#            cond = np.logical_and(LimitCycleReached[:,1],LimitCycleReached[:,0])
#            ChosenSet[ii[cond]]=j[ind][cond]
##            ChosenSet(ii(LimitCycleReached(:,1) & LimitCycleReached(:,2)))=j(ind(LimitCycleReached(:,1) & LimitCycleReached(:,2)));
        iiter=iiter+1;
        
    #Checking which of the chains is relevant
    i=np.nonzero(ChosenSet==0)[0];
    ind=epsilon1[i,-1]<epsilon2[i,-1];
#    ind=np.logical_and(epsilon1[i,-1]<epsilon2[i,-1],np.logical_not(np.isnan(epsilon2[i,-1])));
    ChosenSet[i[ind]]=1;
    ChosenSet[i[np.logical_not(ind)]]=2;
    
    # Output
    i=ChosenSet==1;
    alpha[i]=alpha1[i];
    beta[i]=beta1[i];
    gamma[i]=gamma1[i];
    delta[i]=delta1[i];
    
    i=ChosenSet==2;
    alpha[i]=alpha2[i];
    beta[i]=beta2[i];
    gamma[i]=gamma2[i];
    delta[i]=delta2[i];
    
    i=ChosenSet>0;
    x1[i], x2[i]=SolveQuadratic(np.ones(np.sum(i)),alpha[i],beta[i]);
    x3[i], x4[i]=SolveQuadratic(np.ones(np.sum(i)),gamma[i],delta[i]);

    return np.array([x1,x2,x3,x4])
Пример #58
0
# Author: Gael Varoquaux <*****@*****.**>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.

from mayavi import mlab
import numpy as np

# Calculate the Julia set on a grid
x, y = np.ogrid[-1.5:0.5:500j, -1:1:500j]
z = x + 1j * y

julia = np.zeros(z.shape)

for i in range(50):
    z = z**2 - 0.70176 - 0.3842j
    julia += 1 / float(2 + i) * (z * np.conj(z) > 4)

# Display it
mlab.figure(size=(400, 300))
mlab.surf(julia, colormap='gist_earth', warp_scale='auto', vmax=1.5)

# A view into the "Canyon"
mlab.view(65, 27, 322, [30., -13.7, 136])
mlab.show()
Пример #59
0
def power(H, n, e, u):
    diag = np.power(e, n)
    return np.dot(u, np.dot(np.diag(diag), np.conj(np.transpose(u))))
Пример #60
0
def coherent(N, alpha, offset=0, method=None):
    """Generates a coherent state with eigenvalue alpha.

    Constructed using displacement operator on vacuum state.

    Parameters
    ----------
    N : int
        Number of Fock states in Hilbert space.

    alpha : float/complex
        Eigenvalue of coherent state.

    offset : int (default 0)
        The lowest number state that is included in the finite number state
        representation of the state. Using a non-zero offset will make the
        default method 'analytic'.

    method : string {'operator', 'analytic'}
        Method for generating coherent state.

    Returns
    -------
    state : qobj
        Qobj quantum object for coherent state

    Examples
    --------
    >>> coherent(5,0.25j) # doctest: +SKIP
    Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket
    Qobj data =
    [[  9.69233235e-01+0.j        ]
     [  0.00000000e+00+0.24230831j]
     [ -4.28344935e-02+0.j        ]
     [  0.00000000e+00-0.00618204j]
     [  7.80904967e-04+0.j        ]]

    Notes
    -----
    Select method 'operator' (default) or 'analytic'. With the
    'operator' method, the coherent state is generated by displacing
    the vacuum state using the displacement operator defined in the
    truncated Hilbert space of size 'N'. This method guarantees that the
    resulting state is normalized. With 'analytic' method the coherent state
    is generated using the analytical formula for the coherent state
    coefficients in the Fock basis. This method does not guarantee that the
    state is normalized if truncated to a small number of Fock states,
    but would in that case give more accurate coefficients.

    """
    if offset < 0:
        raise ValueError('Offset must be non-negative')

    if method is None:
        method = "operator" if offset == 0 else "analytic"

    if method == "operator":
        x = basis(N, 0)
        a = destroy(N)
        D = (alpha * a.dag() - conj(alpha) * a).expm()
        return D * x

    elif method == "analytic":
        sqrtn = np.sqrt(np.arange(offset, offset+N, dtype=complex))
        sqrtn[0] = 1  # Get rid of divide by zero warning
        data = alpha/sqrtn
        if offset == 0:
            data[0] = np.exp(-abs(alpha)**2 / 2.0)
        else:
            s = np.prod(np.sqrt(np.arange(1, offset + 1)))  # sqrt factorial
            data[0] = np.exp(-abs(alpha)**2 / 2.0) * alpha**(offset) / s
        np.cumprod(data, out=sqrtn)  # Reuse sqrtn array
        return Qobj(sqrtn)

    else:
        raise ValueError(
            "The method option can only take values 'operator' or 'analytic'")