Beispiel #1
0
def power_process(data, sfreq, toffset, modulus, integration, log_scale, norm, zscale, title):
    """ Break power by modulus and display each block. Integration here acts
    as a pure average on the power level data.
    """
    if modulus:
        block = 0
        block_size = integration*modulus
        block_toffset = toffset
        while block < len(data) / block_size:

            vblock = data[block*block_size:block*block_size+modulus]
            pblock = vblock * numpy.conjugate(vblock)

            # complete integration
            for idx in range(1,integration):

                vblock = data[block*block_size+idx*modulus:block*block_size+idx*modulus+modulus]
                pblock += vblock * numpy.conjugate(vblock)

            pblock /= integration

            power_plot(pblock, sfreq, block_toffset, log_scale, norm, zscale, title)

            block += 1
            block_toffset += block_size / sfreq

    else:
        pdata = data * numpy.conjugate(data)
        power_plot(pdata, sfreq, toffset, log_scale, norm, zscale, title)
 def step(self, u, i):
     print(u, i)
     integrate = self.wfs.gd.integrate
     w_cG = self.w_ucG[u]
     y_cG = self.y_ucG[u]
     wold_cG = self.wold_ucG[u]
     z_cG = self.z_cG
     
     self.solver(w_cG, self.z_cG, u)
     I_c = np.reshape(integrate(np.conjugate(z_cG) * w_cG)**-0.5,
                       (self.dim, 1, 1, 1))
     z_cG *= I_c
     w_cG *= I_c
     
     if i != 0:
         b_c =  1.0 / I_c 
     else:
         b_c = np.reshape(np.zeros(self.dim), (self.dim, 1, 1, 1))
 
     self.hamiltonian.apply(z_cG, y_cG, self.wfs, self.wfs.kpt_u[u])
     a_c = np.reshape(integrate(np.conjugate(z_cG) * y_cG), (self.dim, 1, 1, 1))
     wnew_cG = (y_cG - a_c * w_cG - b_c * wold_cG)
     wold_cG[:] = w_cG
     w_cG[:] = wnew_cG
     self.a_uci[u, :, i] = a_c[:, 0, 0, 0]
     self.b_uci[u, :, i] = b_c[:, 0, 0, 0]
Beispiel #3
0
def get_moments(v,m,n=100):
  """ Get the first n moments of a certain vector
  using the Chebychev recursion relations"""
  mus = np.array([0.0j for i in range(2*n)]) # empty arrray for the moments
  a = v.copy() # first vector
  am = v.copy() # zero vector
  a = m*v  # vector number 1
  bk = (np.transpose(np.conjugate(v))*v)[0,0] # scalar product
  bk1 = (np.transpose(np.conjugate(v))*a)[0,0] # scalar product
  mus[0] = bk  # mu0
  mus[1] = bk1 # mu1
  for i in range(1,n): 
    ap = 2*m*a - am # recursion relation
    bk = (np.transpose(np.conjugate(a))*a)[0,0] # scalar product
    bk1 = (np.transpose(np.conjugate(ap))*a)[0,0] # scalar product
    mus[2*i] = 2.*bk
    mus[2*i+1] = 2.*bk1
    am = a +0. # new variables
    a = ap+0. # new variables
  mu0 = mus[0] # first
  mu1 = mus[1] # second
  for i in range(1,n): 
    mus[2*i] +=  - mu0
    mus[2*i+1] += -mu1 
  return mus
Beispiel #4
0
def idft(dft_vec, dt=1.0):
        """
        computes the inverse DFT of vec
        takes in the one-sided spectrum
        """
        N = len(dft_vec) ### if N is even, then n is even
                         ### if N is odd, then n is odd

        if N%2: ### if N is odd, n is odd
                n = 2*N-1
        else: ### if N is even, n is even
                n = 2*N

        seglen = n*dt ### length of time series

        vec = np.empty((n,), complex)
        vec[:N] = dft_vec
        if n%2: ### odd number of points
                vec[N:] = np.conjugate(dft_vec[1:])[::-1]
        else: ### even number of points
                vec[N:] = np.conjugate(dft_vec)[::-1]

        vec = np.fft.ifft( vec ) / seglen
        time = np.arange(0, seglen, dt)

        return vec, time
Beispiel #5
0
def FFT_Correlation(x,y):
    """
    FFT-based correlation, much faster than numpy autocorr.
    x and y are row-based vectors of arbitrary lengths.
    This is a vectorized implementation of O(N*log(N)) flops.
    """

    lengthx = x.shape[0]
    lengthy = y.shape[0]

    x = np.reshape(x,(1,lengthx))
    y = np.reshape(y,(1,lengthy))

    length = np.array([lengthx, lengthy]).min()
    
    x = x[:length]
    y = y[:length]
    
    fftx = fft(x, 2 * length - 1, axis=1) #pad with zeros
    ffty = fft(y, 2 * length - 1, axis=1)

    corr_xy = fft.ifft(fftx * np.conjugate(ffty), axis=1)
    corr_xy = np.real(fft.fftshift(corr_xy, axes=1)) #should be no imaginary part

    corr_yx = fft.ifft(ffty * np.conjugate(fftx), axis=1)
    corr_yx = np.real(fft.fftshift(corr_yx, axes=1))

    corr = 0.5 * (corr_xy[:,length:] + corr_yx[:,length:]) / range(1,length)[::-1]
    return np.reshape(corr,corr.shape[1])
def MaxInnerProd(ser1, ser2, PSD):
  size = Numeric.shape(ser1)[0]
  pdlen = size/2
  nyquistf = 0.5/15.0   #   !!! hardcoded !!!!
  freqs = Numeric.arange(0,pdlen+1,dtype='d') * (nyquistf / pdlen)
  if(Numeric.shape(ser2)[0] != size):
     print "size of time series must be the same"
     sys.exit(1)
  if(Numeric.shape(PSD)[0] != pdlen):
     print "wrong size of psd: ", pdlen, Numeric.shape(PSD)
     sys.exit(1)
  fourier1 = FFT.fft(ser1)
  fourier2 = FFT.fft(ser2)
  prod1 = Numeric.zeros(pdlen+1, dtype='d')
  prod2 = Numeric.zeros(pdlen+1, dtype='d')
  prod1[0] = 0.0
  prod1[1:pdlen] = numpy.multiply(fourier1[1:pdlen],numpy.conjugate(fourier2[1:pdlen])) + numpy.multiply(fourier1[-1:pdlen:-1],numpy.conjugate(fourier2[-1:pdlen:-1]))
  prod1[pdlen] = fourier1[pdlen]*fourier2[pdlen]
  prod2[0] = 0.0
  prod2[1:pdlen] = numpy.multiply(fourier1[1:pdlen],numpy.conjugate(fourier2[1:pdlen]*1.j)) + numpy.multiply((fourier1[-1:pdlen:-1]),numpy.conjugate(fourier2[-1:pdlen:-1]*(-1.j)))
  prod2[pdlen] = fourier1[pdlen]*fourier2[pdlen]
  Numeric.divide(prod1[1:], PSD, prod1[1:]) 
  Numeric.divide(prod2[1:], PSD, prod2[1:]) 
  olap0 = 0.0
  olappiby2 = 0.0
  for i in xrange(pdlen):
      if (freqs[i] > fLow and freqs[i]<= fHigh):
           olap0 += prod1[i]
	   olappiby2 += prod2[i]
  olap0 = 2.0*olap0/float(size)
  olappiby2 = 2.0*olappiby2/float(size)
#  olap0 =  2.0*(numpy.sum(prod1[1:]))/float(size) #it must be scaled by dt
#  olappiby2 =  2.0*(numpy.sum(prod2[1:]))/float(size) #it must be scaled by dt
  print "angle of maxim. = ", math.atan(olappiby2/olap0)
  return sqrt(olap0**2 + olappiby2**2) 
 def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
   # output's shape depends on adj[0] and adj[1]
   if adjoint_a:
     x = np.conjugate(np.swapaxes(x, -1, -2))
   if adjoint_b:
     y = np.conjugate(np.swapaxes(y, -1, -2))
   return np.matmul(x, y)
Beispiel #8
0
    def get_alm(self,l=None,m=None,lms=None):
        """
        hp.map2alm only returns the positive m coefficients - we need
        to derive the negative ones ourselves if we are going to
        do anything with them outside healpy. See
        http://stackoverflow.com/questions/30888908/healpy-map2alm-function-does-not-return-expected-number-of-alm-values?lq=1
        for discussion.
        """
        if (l is None or m is None) and lms is None:
            return None
        
        elif l is None and m is None:
            ay = np.zeros(len(lms),dtype=np.complex128)
            for i in lms:
                if i[1] >= 0:
                    index = hp.Alm.getidx(self.lmax, i[0], i[1])
                    prefactor = 1.0
                    value = self.alm[index]
                else:
                    index = hp.Alm.getidx(self.lmax, i[0], -i[1])
                    prefactor = (-1.0)**i[1]
                    value = np.conjugate(self.alm[index])
                ay[i[0]**2+i[0]+i[1]-(lms[0][0])**2] = prefactor * value
            return ay

        elif m >= 0:
            index = hp.Alm.getidx(self.lmax, l, m)
            prefactor = 1.0
            value = self.alm[index]
        else:
            index = hp.Alm.getidx(self.lmax, l, -m)
            prefactor = (-1.0)**m
            value = np.conjugate(self.alm[index])

        return prefactor * value
    def mix_parameters(self, Pibra, Piket):
        r"""Mix the two parameter sets :math:`\Pi_i` and :math:`\Pi_j`
        from the 'bra' and the 'ket' wavepackets :math:`\Phi\left[\Pi_i\right]`
        and :math:`\Phi^\prime\left[\Pi_j\right]`.

        :param Pibra: The parameter set :math:`\Pi_i` from the bra part wavepacket.
        :param Piket: The parameter set :math:`\Pi_j` from the ket part wavepacket.
        :return: The mixed parameters :math:`q_0` and :math:`Q_S`. (See the theory for details.)
        """
        # <Pibra | ... | Piket>
        qr, pr, Qr, Pr = Pibra
        qc, pc, Qc, Pc = Piket

        # Mix the parameters
        Gr = dot(Pr, inv(Qr))
        Gc = dot(Pc, inv(Qc))

        r = imag(Gc - conjugate(Gr.T))
        s = imag(dot(Gc, qc) - dot(conjugate(Gr.T), qr))

        q0 = dot(inv(r), s)
        Q0 = 0.5 * r

        # Here we can not avoid the matrix root by using svd
        Qs = inv(sqrtm(Q0))

        return (q0, Qs)
    def perform_quadrature(self, row, col):
        r"""Evaluates the integral :math:`\langle \Phi_i | \Phi^\prime_j \rangle`
        by an exact symbolic formula.

        :param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
        :param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
        :return: A single complex floating point number.
        """
        eps = self._packet.get_eps()

        Pibra = self._pacbra.get_parameters(component=row)
        Piket = self._packet.get_parameters(component=col)
        cbra = self._pacbra.get_coefficient_vector(component=row)
        cket = self._packet.get_coefficient_vector(component=col)
        Kbra = self._pacbra.get_basis_shapes(component=row)
        Kket = self._packet.get_basis_shapes(component=col)

        self._cache_factors(Pibra[:4], Piket[:4], Kbra, Kket, eps)

        result = array([[0.0j]], dtype=complexfloating)

        for r in Kbra:
            for c in Kket:
                cr = cbra[Kbra[r],0]
                cc = cket[Kket[c],0]
                i = self.exact_result_higher(Pibra[:4], Piket[:4], eps, r[0], c[0])
                result = result + conjugate(cr) * cc * i

        phase = exp(1.0j/eps**2 * (Piket[4]-conjugate(Pibra[4])))
        return phase * result
def make_topo(a, A, B, C, D, M, E, t, sigma_0,mixing,iterations,N, n_layers):
	"""
	Creates a topological insulator from given parameters.
	"""	
	E_s = C + M - 4*((B+D)/(a**2))
	E_p = C - M - 4*((D-B)/(a**2))	
	V_ss = (B+D)/(a**2)
	V_pp = (D-B)/(a**2)
	V_sp = (-1)*(1.j)*(A)/(2*a)
	h_topo = np.diag([E_s,E_p,E_s,E_p])
	t_y = np.zeros((4,4), dtype=np.complex_)
	t_y[0,0] = V_ss
	t_y[0,1] = (1.j)*V_sp
	t_y[1,0] = (1.j)*np.conjugate(V_sp)
	t_y[1,1] = V_pp
	t_y[2,2] = V_ss
	t_y[2,3] = (-1)*t_y[1,0]
	t_y[3,2] = (-1)*t_y[0,1]
	t_y[3,3] = V_pp
	t_x = np.zeros((4,4), dtype=np.complex_)
	t_x[0,0] = V_ss
	t_x[0,1] = 1*V_sp
	t_x[1,0] = (-1)*np.conjugate(V_sp)
	t_x[1,1] = V_pp
	t_x[2,2] = V_ss
	t_x[2,3] = 1*np.conjugate(V_sp)
	t_x[3,2] = (-1)*V_sp
	t_x[3,3] = V_pp
	for arr in [h_topo,t_x,t_y]:
		arr = swap_cols(arr,1,2)
		arr = swap_rows(arr,1,2)
	topo = constructor.Constructor(E,h_topo,t_y,sigma_0,mixing,iterations,N, n_layers, t_x)
	return (topo, t_x)
Beispiel #12
0
def xcorr(x,y,**kwargs):
	"""cross correlation by rfft"""
	x = np.asarray(x)
	y = np.asarray(y)
	if np.ndim(x) == np.ndim(y):
		shape=kwargs.get('shape',np.max((x.shape, y.shape), axis = 0))
		return np.fft.irfftn(np.conjugate(np.fft.rfftn(x,s=shape))*np.fft.rfftn(y,s=shape))
	elif np.ndim(y) == 1:
		axis = kwargs.get('axis', 0)
		shape=kwargs.get('shape', max(x.shape[axis], len(y)))
		shape+=shape%2
		outshape = np.array(x.shape[:])
		outshape[axis] = shape
		out = np.zeros(outshape)
		y = np.fft.ifftshift(np.pad(y, pad_width = (int((shape-len(y)+1)/2), int((shape-len(y))/2)), mode = 'constant'))
		y_fft = np.fft.rfft(y, n=shape)
		x_fft = np.fft.rfft(x, n=shape, axis=axis)
		if axis == 0:
			for ii in range(len(x_fft[0])):
				out[:,ii] = np.fft.irfft(x_fft[:,ii]*np.conjugate(y_fft))
		else:
			for ii in range(len(x_fft)):
				out[ii] = np.fft.irfft(x_fft[ii]*np.conjugate(y_fft))
		return out
	else:
		raise ValueError('Only inputs with dimensions of 1 or 2 can be processed.')
    def exact_result_ground(self, Pibra, Piket, eps):
        r"""Compute the overlap integral :math:`\langle \phi_0 | \phi_0 \rangle` of
        the groundstate :math:`\phi_0` by using the symbolic formula:

        .. math::
            \langle \phi_0 | \phi_0 \rangle =
            \sqrt{\frac{-2 i}{Q_2 \overline{P_1} - P_2 \overline{Q_1}}} \cdot
              \exp \Biggl(
                \frac{i}{2 \varepsilon^2}
                \frac{Q_2 \overline{Q_1} \left(p_2-p_1\right)^2 + P_2 \overline{P_1} \left(q_2-q_1\right)^2}
                      {\left(Q_2 \overline{P_1} - P_2 \overline{Q_1}\right)}
              \\
              -\frac{i}{\varepsilon^2}
              \frac{\left(q_2-q_1\right) \left( Q_2 \overline{P_1} p_2 - P_2 \overline{Q_1} p_1\right)}
                   {\left(Q_2 \overline{P_1} - P_2 \overline{Q_1}\right)}
              \Biggr)

        Note that this is an internal method and usually there is no
        reason to call it from outside.

        :param Pibra: The parameter set :math:`\Pi = \{q_1,p_1,Q_1,P_1\}` of the bra :math:`\langle \phi_0 |`.
        :param Piket: The parameter set :math:`\Pi^\prime = \{q_2,p_2,Q_2,P_2\}` of the ket :math:`| \phi_0 \rangle`.
        :param eps: The semi-classical scaling parameter :math:`\varepsilon`.
        :return: The value of the integral :math:`\langle \phi_0 | \phi_0 \rangle`.
        """
        q1, p1, Q1, P1 = Pibra
        q2, p2, Q2, P2 = Piket
        hbar = eps**2
        X = Q2*conjugate(P1) - P2*conjugate(Q1)
        I = sqrt(-2.0j/X) * exp( 1.0j/(2*hbar) * (Q2*conjugate(Q1)*(p2 - p1)**2 + P2*conjugate(P1)*(q2 - q1)**2) / X
                                -1.0j/hbar *     ((q2 - q1)*(Q2*conjugate(P1)*p2 - P2*conjugate(Q1)*p1)) / X
                               )
        return I
Beispiel #14
0
 def myzpk2tf(self, z, p, k):
         z = np.atleast_1d(z)
         k = np.atleast_1d(k)
         if len(z.shape) > 1:
                 temp = np.poly(z[0])
                 b = np.zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
                 if len(k) == 1:
                         k = [k[0]] * z.shape[0]
                 for i in range(z.shape[0]):
                         b[i] = k[i] * poly(z[i])
         else:
                 b = k * np.poly(z)
         a = np.atleast_1d(np.poly(p))
         # Use real output if possible. Copied from numpy.poly, since
         # we can't depend on a specific version of numpy.
         if issubclass(b.dtype.type, np.complexfloating):
                 # if complex roots are all complex conjugates, the roots are real.
                 roots = np.asarray(z, complex)
                 pos_roots = np.compress(roots.imag > 0, roots)
                 neg_roots = np.conjugate(np.compress(roots.imag < 0, roots))
                 if len(pos_roots) == len(neg_roots):
                         if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)):
                                 b = b.real.copy()
         if issubclass(a.dtype.type, np.complexfloating):
                 # if complex roots are all complex conjugates, the roots are real.
                 roots = np.asarray(p, complex)
                 pos_roots = np.compress(roots.imag > 0, roots)
                 neg_roots = np.conjugate(np.compress(roots.imag < 0, roots))
                 if len(pos_roots) == len(neg_roots):
                         if np.all(np.sort_complex(neg_roots) == np.sort_complex(pos_roots)):
                                 a = a.real.copy()
         return b, a
Beispiel #15
0
    def demodulate_data(self, data):
        """
        Demodulate the data from the FFT bin

        This function assumes that self.select_fft_bins was called to set up the necessary class attributes

        data : array of complex data

        returns : demodulated data in an array of the same shape and dtype as *data*
        """
        bank = self.bank
        hardware_delay = self.hardware_delay_estimate*1e6
        demod = np.zeros_like(data)
        t = np.arange(data.shape[0])
        for n, ich in enumerate(self.readout_selection):
            phi0 = self.phases[ich]
            k = self.tone_bins[bank, ich]
            m = self.fft_bins[bank, ich]
            if m >= self.nfft // 2:
                sign = -1.0
            else:
                sign = 1.0
            nfft = self.nfft
            ns = self.tone_nsamp
            f_tone = k * self.fs / float(ns)
            foffs = (2 * k * nfft - m * ns) / float(ns)
            wc = self._window_response(foffs / 2.0) * (self.tone_nsamp / 2.0 ** 18)
            #print "chan",m,"tone",k,"sign",sign,"foffs",foffs
            demod[:, n] = (wc * np.exp(sign * 1j * (2 * np.pi * foffs * t + phi0) - sign *
                                       2j*np.pi*f_tone*hardware_delay)
                           * data[:, n])
            if m >= self.nfft // 2:
                demod[:, n] = np.conjugate(demod[:, n])
        return self.wavenorm*np.conjugate(demod)
Beispiel #16
0
def toeplitz(c,r=None):
    """ Construct a toeplitz matrix (i.e. a matrix with constant diagonals).

        Description:

           toeplitz(c,r) is a non-symmetric Toeplitz matrix with c as its first
           column and r as its first row.

           toeplitz(c) is a symmetric (Hermitian) Toeplitz matrix (r=c).

        See also: hankel
    """
    isscalar = numpy.isscalar
    if isscalar(c) or isscalar(r):
        return c
    if r is None:
        r = c
        r[0] = conjugate(r[0])
        c = conjugate(c)
    r,c = map(asarray_chkfinite,(r,c))
    r,c = map(ravel,(r,c))
    rN,cN = map(len,(r,c))
    if r[0] != c[0]:
        print "Warning: column and row values don't agree; column value used."
    vals = r_[r[rN-1:0:-1], c]
    cols = mgrid[0:cN]
    rows = mgrid[rN:0:-1]
    indx = cols[:,newaxis]*ones((1,rN),dtype=int) + \
           rows[newaxis,:]*ones((cN,1),dtype=int) - 1
    return take(vals, indx, 0)
Beispiel #17
0
    def calclogI(self):
        """
        The logarithm intensity function.

        Returns:
        Numpy.complex data type representing the value of the logarithm of the intensity function.
        """
        ret=numpy.complex(0.,0.)
        for n in range(0,len(self.alphaList)-1,1):    
            argret=numpy.complex(0.,0.)            
            for wave1 in self.waves:
                for wave2 in self.waves:
                    if len(self.productionAmplitudes)!=0:
                                #logarithmic domain error
                        arg = self.productionAmplitudes[self.waves.index(wave1)]*numpy.conjugate(self.productionAmplitudes[self.waves.index(wave2)])*wave1.complexamplitudes[n]*numpy.conjugate(wave2.complexamplitudes[n])*spinDensity(self.beamPolarization,self.alphaList[n])[wave1.epsilon,wave2.epsilon]
                        argret+=arg
            argret=argret.real
            if self.debugPrinting==1:                        
                print"loop#",n,"="*10
                print"argval:",arg
                print"argtype:",type(arg)
                print"productionAmps1:",self.productionAmplitudes[self.waves.index(wave1)]
                print"productionAmps2*:",numpy.conjugate(self.productionAmplitudes[self.waves.index(wave2)])
                print"spinDensityValue:",spinDensity(self.beamPolarization,self.alphaList[n])[wave1.epsilon,wave2.epsilon]
                print"A1:",wave1.complexamplitudes[n]                        
                print"A2*:",numpy.conjugate(wave2.complexamplitudes[n])
            if argret > 0.:                        
                ret+=log(argret)
            
            self.iList.append(argret)                           
        return ret
Beispiel #18
0
def build_eh_nonh(hin,c1=None,c2=None):
  """Creates a electron hole matrix, from an input matrix, coupling couples
     electrons and holes
      - hin is the hamiltonian for electrons, which has the usual common form
      - coupling is the matrix which tells the coupling between electron
        on state i woth holes on state j, for exmaple, with swave pairing
        the non vanishing elments are (0,1),(2,3),(4,5) and so on..."""
  n = len(hin)  # dimension of input
  nn = 2*n  # dimension of output
  hout = np.matrix(np.zeros((nn,nn),dtype=complex))  # output hamiltonian
  for i in range(n):
    for j in range(n):
      hout[2*i,2*j] = hin[i,j]  # electron term
      hout[2*i+1,2*j+1] = -np.conjugate(hin[i,j])  # hole term
  if not c1 is None: # if there is coupling
    for i in range(n):
      for j in range(n):
        # couples electron in i with hole in j
        hout[2*i,2*j+1] = c1[i,j]  # electron hole term
  if not c2 is None: # if there is coupling
    for i in range(n):
      for j in range(n):
        # couples hole in i with electron in j
        hout[2*j+1,2*i] = np.conjugate(c2[i,j])  # hole electron term
  return hout 
Beispiel #19
0
    def func(self, X, V):
        k = self.C.TFdata.k
        v1 = self.C.TFdata.v1
        w1 = self.C.TFdata.w1

        if k >=0:
            J_coords = self.F.sysfunc.J_coords
            w = sqrt(k)

            q = v1 - (1j/w)*matrixmultiply(self.F.sysfunc.J_coords,v1)
            p = w1 + (1j/w)*matrixmultiply(transpose(self.F.sysfunc.J_coords),w1)

            p /= linalg.norm(p)
            q /= linalg.norm(q)

            p = reshape(p,(p.shape[0],))
            q = reshape(q,(q.shape[0],))

            direc = conjugate(1/matrixmultiply(transpose(conjugate(p)),q))
            p = direc*p

            l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, p=p, q=q)

            return array([l1])
        else:
            return array([1])
Beispiel #20
0
def MakeEpsilonScreen(Nx, Ny, rngseed = 0):
    if rngseed != 0:
        np.random.seed( rngseed )

    epsilon = np.random.normal(loc=0.0, scale=1.0/math.sqrt(2), size=(Nx,Ny)) + 1j * np.random.normal(loc=0.0, scale=1.0/math.sqrt(2), size=(Nx,Ny))
    epsilon[0][0] = 0.0

    #Now let's ensure that it has the necessary conjugation symmetry
    for x in range(Nx):
        if x > (Nx-1)/2:
            epsilon[0][x] = np.conjugate(epsilon[0][Nx-x])
        for y in range((Ny-1)/2, Ny):
            x2 = Nx - x
            y2 = Ny - y
            if x2 == Nx:
                x2 = 0
            if y2 == Ny:
                y2 = 0
            epsilon[y][x] = np.conjugate(epsilon[y2][x2])

    if no_linear_shift == True:
        epsilon[0,0] = 0
        epsilon[1,0] = 0
        epsilon[0,1] = 0
        epsilon[-1,0] = 0
        epsilon[0,-1] = 0

    return epsilon
Beispiel #21
0
    def __init__(self, A = None, eps = 1e-14):

        if A is None:
            self.core = 0
            self.u = [0, 0, 0]
            self.n = [0, 0, 0]
            self.r = [0, 0, 0]
            return

        N1, N2, N3 = A.shape

        B1 = np.reshape(A, (N1, -1), order='F')
        B2 = np.reshape(np.transpose(A, [1, 0, 2]), (N2, -1), order='F' )
        B3 = np.reshape(np.transpose(A, [2, 0, 1]), (N3, -1), order='F')

        U1, V1, r1 = svd_trunc(B1, eps)
        U2, V2, r2 = svd_trunc(B2, eps)
        U3, V3, r3 = svd_trunc(B3, eps)

        G = np.tensordot(A, np.conjugate(U3), (2,0))
        G = np.transpose(G, [2, 0, 1])
        G = np.tensordot(G, np.conjugate(U2), (2,0))
        G = np.transpose(G, [0, 2, 1])
        G = np.tensordot(G, np.conjugate(U1), (2,0))
        G = np.transpose(G, [2, 1, 0])

        self.n = [N1, N2, N3]
        self.r = G.shape
        self.u = [U1, U2, U3]
        self.core = G
Beispiel #22
0
def dft2d(img, flags):
    if flags == 1:
        return Fdft2d(img)
    elif flags == -1:
        res = np.conjugate(img)
        #return np.conjugate(Fdft2d(img))
        return np.conjugate(Fdft2d(img))
Beispiel #23
0
def get_moments(v,m,n=100,use_fortran=use_fortran):
  """ Get the first n moments of a certain vector
  using the Chebychev recursion relations"""
  if use_fortran:
    from kpmf90 import get_momentsf90 # fortran routine
    mo = coo_matrix(m) # convert to coo matrix
    vo = v.todense() # convert to conventional vector
    vo = np.array([vo[i,0] for i in range(len(vo))])
# call the fortran routine
    mus = get_momentsf90(mo.row+1,mo.col+1,mo.data,vo,n) 
    return mus # return fortran result
  else:
    mus = np.array([0.0j for i in range(2*n)]) # empty arrray for the moments
    a = v.copy() # first vector
    am = v.copy() # zero vector
    a = m*v  # vector number 1
    bk = (np.transpose(np.conjugate(v))*v)[0,0] # scalar product
    bk1 = (np.transpose(np.conjugate(v))*a)[0,0] # scalar product
    mus[0] = bk  # mu0
    mus[1] = bk1 # mu1
    for i in range(1,n): 
      ap = 2*m*a - am # recursion relation
      bk = (np.transpose(np.conjugate(a))*a)[0,0] # scalar product
      bk1 = (np.transpose(np.conjugate(ap))*a)[0,0] # scalar product
      mus[2*i] = 2.*bk
      mus[2*i+1] = 2.*bk1
      am = a +0. # new variables
      a = ap+0. # new variables
    mu0 = mus[0] # first
    mu1 = mus[1] # second
    for i in range(1,n): 
      mus[2*i] +=  - mu0
      mus[2*i+1] += -mu1 
    return mus
 def testDelta_simmetric(self):
     m=1;
     c1 = 2; n1 = 4
     c2 = 3; n2 = 4;
     func = lambda x: pro_ang1(m, n1, c1, x)[0]  * numpy.conjugate(pro_ang1(m, n2, c2, x)[0])
     func2 = lambda x: pro_ang1(m, n2, c2, x)[0]  * numpy.conjugate(pro_ang1(m, n1, c1, x)[0])
     self.assertAlmostEqual(quad(func, -1, 1), quad(func2, -1, 1), places = 7)
    def perform_quadrature(self, row, col):
        r"""Evaluates the integral :math:`\langle \Phi_i | \Phi^\prime_j \rangle`
        by an exact symbolic formula.

        .. warning:: This method does only take into account the ground state
                     basis components :math:`\phi_{\underline{0}}` from both,
                     the 'bra' and the 'ket'. If the wavepacket :math:`\Phi`
                     contains higher order basis functions :math:`\phi_{\underline{k}}`
                     with non-zero coefficients :math:`c_{\underline{k}}`, the inner products
                     computed are wrong! There is also no warning about that.

        :param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
        :param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
        :return: A single complex floating point number.
        """
        eps = self._packet.get_eps()
        D = self._packet.get_dimension()

        Pibra = self._pacbra.get_parameters(component=row)
        Piket = self._packet.get_parameters(component=col)
        cbra = self._pacbra.get_coefficient_vector(component=row)
        cket = self._packet.get_coefficient_vector(component=col)
        Kbra = self._pacbra.get_basis_shapes(component=row)
        Kket = self._packet.get_basis_shapes(component=col)

        phase = exp(1.0j/eps**2 * (Piket[4]-conjugate(Pibra[4])))

        z = tuple(D*[0])
        cr = cbra[Kbra[z],0]
        cc = cket[Kket[z],0]
        i = self.exact_result_gauss(Pibra[:4], Piket[:4], D, eps)
        result = phase * conjugate(cr) * cc * i

        return result
Beispiel #26
0
def verify_gaus_sum_complex_conj(N):

    ALMOST_ZERO = 0.001

    # get dirich_char_mat, which is a 2D matrix
    dirich_char_mat, phi_n, coprime_list, prim_char_stat_all = dirichi_char_for_n(N)

    # number of row. Note: 1st row is principle
    for i in range(0, phi_n):

        chi = dirich_char_mat[i]

        gaus_sum = gaus_sum_for_dirich_char(chi)
        chi_conj = np.conjugate(chi)
        gaus_sum_for_chi_conj = gaus_sum_for_dirich_char(chi_conj)

        gaus_sum_conj = np.conjugate(gaus_sum)

        # chi(-1) = chi(N-1)
        # It seems this value is always be 1 or -1, why ?
        chi_minus_1 = chi[N-1]

        tmp = chi_minus_1*gaus_sum_conj

        assert(np.absolute(gaus_sum_for_chi_conj - tmp) < ALMOST_ZERO)
        # print 'chi      = ', chi
        # print 'chi_conj = ', chi_conj
        # print 'N = %d, i = %d'%(N, i), ', gaus_sum_for_chi_conj = ', gaus_sum_for_chi_conj, ', tmp = ', tmp, ', chi_minus_1 = ', chi_minus_1

    print 'verify_gaus_sum_complex_conj() pass !'
    def quadrature(self, lcket, operator=None, component=None):
        r"""Delegates the evaluation of :math:`\langle\Upsilon|f|\Upsilon\rangle` for a general
        function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.

        :param lcket: The linear combination :math:`\Upsilon` with :math:`J` summands :math:`\Psi_j`.
        :param operator: A matrix-valued function :math:`f(x): \mathbb{R}^D \rightarrow \mathbb{R}^{N \times N}`.
        :return: The value of :math:`\langle\Upsilon|f|\Upsilon\rangle`.
        :type: An :py:class:`ndarray`.
        """
        J = lcket.get_number_packets()
        packets = lcket.get_wavepackets()

        M = zeros((J, J), dtype=complexfloating)

        # Elements below the diagonal
        for row, pacbra in enumerate(packets):
            for col, packet in enumerate(packets[:row]):
                if self._obey_oracle:
                    if self._oracle.is_not_zero(pacbra, packet):
                        # TODO: Handle multi-component packets
                        M[row, col] = self._quad.quadrature(pacbra, packet, operator=operator, component=0)
                else:
                    # TODO: Handle multi-component packets
                    M[row, col] = self._quad.quadrature(pacbra, packet, operator=operator, component=0)

        M = M + conjugate(transpose(M))

        # Diagonal Elements
        for d, packet in enumerate(packets):
            # TODO: Handle multi-component packets
            M[d, d] = self._quad.quadrature(packet, packet, operator=operator, component=0)

        c = lcket.get_coefficients()

        return dot(conjugate(transpose(c)), dot(M, c))
    def mfunc(uv, p, d):
        crd,t,(i,j) = p
        p1,p2 = a.miriad.pol2str[uv['pol']]
        #if i == j and (p1,p2) == ('y','x'): return p, None, None

#        if is_run1(t):
        ni = rewire_run1[i][p1]
        nj = rewire_run1[j][p2]
        if t!= curtime:
            aa.set_jultime(t)
            uvo['lst'] = aa.sidereal_time()
            uvo['ra'] = aa.sidereal_time()
            uvo['obsra'] = aa.sidereal_time()
#        else: return p, None
        if ni > nj:
            ni,nj = nj,ni
            
            if (p1,p2) != ('y','x'): d = n.conjugate(d)
        elif ni < nj and (p1,p2) == ('y','x'):
            d = n.conjugate(d)

        p = crd,t,(ni,nj)
#        print t,i,j,a.miriad.pol2str[uv['pol']],'->',ni,nj,'xx'
        d[orbchan].mask = 1
        return p,d
Beispiel #29
0
  def update_expectation_values(self):
    """Calculate the expectation values of the different operators"""
    # this conjugate comes from being inconsistent
    # in the routines to calculate exectation values
    voccs = np.conjugate(self.wavefunctions) # get wavefunctions
    ks = self.kvectors # kpoints
    mode = self.correlator_mode # 
#    mode = "1by1"
    if mode=="plain": # conventional mode
      for v in self.interactions:
        v.vav = (voccs*v.a*voccs.H).trace()[0,0]/self.kfac # <vAv>
        v.vbv = (voccs*v.b*voccs.H).trace()[0,0]/self.kfac # <vBv>
    elif mode=="1by1": # conventional mode
      for v in self.interactions:
        phis = [self.hamiltonian.geometry.bloch_phase(v.dir,k*0.) for k in ks]
        v.vav = meanfield.expectation_value(voccs,v.a,np.conjugate(phis))/self.kfac # <vAv>
        v.vbv = meanfield.expectation_value(voccs,v.b,phis)/self.kfac # <vBv>
      self.v2cij() # update the v vector
    elif mode=="multicorrelator": # multicorrelator mode
      numc = len(self.interactions)*2 # number of correlators
      if self.bloch_multicorrelator:
        cs = multicorrelator_bloch(voccs,ks,self.lamb,self.ijk,self.dir,numc)
      else: cs = multicorrelator(voccs,self.lamb,self.ijk,numc)
      self.cij = cs/self.kfac # store in the object, already normalized
      self.cij2v() # update the expectation values
    else: raise
Beispiel #30
0
def MakeEpsilonScreenFromList(EpsilonList, N):
    epsilon = np.zeros((N,N),dtype=np.complex)
    #There are (N^2-1)/2 real elements followed by (N^2-1)/2 complex elements

    #The first (N-1)/2 are the top row
    N_re = (N*N-1)/2
    i = 0
    for x in range(1,(N+1)/2):
        epsilon[0][x] = EpsilonList[i] + 1j * EpsilonList[i+N_re]
        epsilon[0][N-x] = np.conjugate(epsilon[0][x])
        i=i+1

    #The next N(N-1)/2 are filling the next N rows
    for y in range(1,(N+1)/2):
        for x in range(N):
            epsilon[y][x] = EpsilonList[i] + 1j * EpsilonList[i+N_re]

            x2 = N - x
            y2 = N - y
            if x2 == N:
                x2 = 0
            if y2 == N:
                y2 = 0

            epsilon[y2][x2] = np.conjugate(epsilon[y][x])
            i=i+1    

    if no_linear_shift == True:
        epsilon[0,0] = 0
        epsilon[1,0] = 0
        epsilon[0,1] = 0
        epsilon[-1,0] = 0
        epsilon[0,-1] = 0

    return epsilon
Beispiel #31
0
def C_polynomial(roots):
    s = sympy.symbols("s")
    polynomial = sympy.Poly(functools.reduce(
        lambda a, b: a * b, [s - np.conjugate(root) for root in roots]),
                            domain="CC")
    return [complex(c) for c in polynomial.coeffs()]
Beispiel #32
0
matrix = np.zeros((num_ms, lstop + 1))

output = []

for l in np.arange(lstop + 1):
    for j in np.arange(0, (2 * l + 1), 1):
        m = -l + j

        if (m >= 0):
            index = hp.sphtfunc.Alm.getidx(lmax, l, m)
            alm = alms[index]

        else:
            index = hp.sphtfunc.Alm.getidx(
                lmax, l, -m)  ## For a real map, a_{l -m} = (-1)^m a_{lm} *
            alm = np.conjugate(alms[index]) * (-1.)**m

        matrix[l - m][l] = (alm.real**2. + alm.imag**2.) / Cls[l]

        output.append([l, m, alm, np.absolute(alm), Cls[l]])

        for k in np.arange(num_ms):
            if (k >= (2 * l + 1)):
                matrix[k][l] = np.nan

        print l, m, index, alm, matrix[l - m][l]

output = np.array(output, dtype=complex)
## output = np.array(output, dtype=[('ell', np.float32), ('m', np.float32), ('alm', np.float32), ('mod_alm', np.float32), ('Cl', np.float32)])
## output = np.sort(output,  order='mod_alm')
def GetFreeCoupling_Eq(MADTwiss, FilesX, FilesY, Qh, Qv, Qx, Qy, psih_ac2bpmac,
                       psiv_ac2bpmac, bd, acdipole, oa):

    #-- Details of this algorithms is in http://www.agsrhichome.bnl.gov/AP/ap_notes/ap_note_410.pdf

    #-- Check linx/liny files, may be redundant
    if len(FilesX) != len(FilesY): return [{}, []]

    #-- Select common BPMs
    bpm = Utilities.bpm.model_intersect(
        Utilities.bpm.intersect(FilesX + FilesY), MADTwiss)
    bpm = [(b[0], str.upper(b[1])) for b in bpm]

    #-- Last BPM on the same turn to fix the phase shift by Q for exp data of LHC
    #if op=="1" and bd== 1: s_lastbpm=MADTwiss.S[MADTwiss.indx['BPMSW.1L2.B1']]
    #if op=="1" and bd==-1: s_lastbpm=MADTwiss.S[MADTwiss.indx['BPMSW.1L8.B2']]

    #-- Determine the BPM closest to the AC dipole and its position
    #BPMYB.6L4.B1 BPMYA.5L4.B1
    # BPMWA.B5L4.B1

    bpmac1_h = psih_ac2bpmac.keys()[0]
    bpmac2_h = psih_ac2bpmac.keys()[1]

    bpmac1_v = psiv_ac2bpmac.keys()[0]
    bpmac2_v = psiv_ac2bpmac.keys()[1]

    try:
        k_bpmac_h = list(zip(*bpm)[1]).index(bpmac1_h)
        bpmac_h = bpmac1_h
    except:
        try:
            k_bpmac_h = list(zip(*bpm)[1]).index(bpmac2_h)
            bpmac_h = bpmac2_h
        except:
            print >> sys.stderr, 'WARN: BPMs next to AC dipoles or ADT missing. AC or ADT dipole effects not calculated with analytic eqs for coupling'
            return [{}, []]
    #      if 'B5R4' in b: bpmac1=b
    #if 'A5R4' in b: bpmac2=b
    try:
        k_bpmac_v = list(zip(*bpm)[1]).index(bpmac1_v)
        bpmac_v = bpmac1_v
    except:
        try:
            k_bpmac_v = list(zip(*bpm)[1]).index(bpmac2_v)
            bpmac_v = bpmac2_v
        except:
            print >> sys.stderr, 'WARN: BPMs next to AC dipoles or ADT missing. AC dipole or ADT effects not calculated with analytic eqs for coupling'
            return [{}, []]
    print k_bpmac_v, bpmac_v
    print k_bpmac_h, bpmac_h
    #-- Global parameters of the driven motion
    dh = Qh - Qx
    dv = Qv - Qy
    rh = sin(np.pi * (Qh - Qx)) / sin(np.pi * (Qh + Qx))
    rv = sin(np.pi * (Qv - Qy)) / sin(np.pi * (Qv + Qy))
    rch = sin(np.pi * (Qh - Qy)) / sin(np.pi * (Qh + Qy))
    rcv = sin(np.pi * (Qx - Qv)) / sin(np.pi * (Qx + Qv))

    #-- Loop for files
    f1001Abs = np.zeros((len(bpm), len(FilesX)))
    f1010Abs = np.zeros((len(bpm), len(FilesX)))
    f1001xArg = np.zeros((len(bpm), len(FilesX)))
    f1001yArg = np.zeros((len(bpm), len(FilesX)))
    f1010xArg = np.zeros((len(bpm), len(FilesX)))
    f1010yArg = np.zeros((len(bpm), len(FilesX)))
    for i in range(len(FilesX)):

        #-- Read amplitudes and phases
        amph = np.array([FilesX[i].AMPX[FilesX[i].indx[b[1]]] for b in bpm])
        ampv = np.array([FilesY[i].AMPY[FilesY[i].indx[b[1]]] for b in bpm])
        amph01 = np.array([FilesX[i].AMP01[FilesX[i].indx[b[1]]] for b in bpm])
        ampv10 = np.array([FilesY[i].AMP10[FilesY[i].indx[b[1]]] for b in bpm])
        psih = 2 * np.pi * np.array(
            [FilesX[i].MUX[FilesX[i].indx[b[1]]] for b in bpm])
        psiv = 2 * np.pi * np.array(
            [FilesY[i].MUY[FilesY[i].indx[b[1]]] for b in bpm])
        psih01 = 2 * np.pi * np.array(
            [FilesX[i].PHASE01[FilesX[i].indx[b[1]]] for b in bpm])
        psiv10 = 2 * np.pi * np.array(
            [FilesY[i].PHASE10[FilesY[i].indx[b[1]]] for b in bpm])
        #-- I'm not sure this is correct for the coupling so I comment out this part for now (by RM 9/30/11).
        #for k in range(len(bpm)):
        #       try:
        #               if bpm[k][0]>s_lastbpm:
        #                       psih[k]  +=bd*2*np.pi*Qh  #-- To fix the phase shift by Qh
        #                       psiv[k]  +=bd*2*np.pi*Qv  #-- To fix the phase shift by Qv
        #                       psih01[k]+=bd*2*np.pi*Qv  #-- To fix the phase shift by Qv
        #                       psiv10[k]+=bd*2*np.pi*Qh  #-- To fix the phase shift by Qh
        #       except: pass

        #-- Construct Fourier components
        #   * be careful for that the note is based on x+i(alf*x*bet*x')).
        #   * Calculating Eqs (87)-(92) by using Eqs (47) & (48) (but in the Fourier space) in the note.
        #   * Note that amph(v)01 is normalized by amph(v) and it is un-normalized in the following.
        dpsih = np.append(psih[1:], 2 * np.pi * Qh + psih[0]) - psih
        dpsiv = np.append(psiv[1:], 2 * np.pi * Qv + psiv[0]) - psiv
        dpsih01 = np.append(psih01[1:], 2 * np.pi * Qv + psih01[0]) - psih01
        dpsiv10 = np.append(psiv10[1:], 2 * np.pi * Qh + psiv10[0]) - psiv10

        X_m10 = 2 * amph * np.exp(-1j * psih)
        Y_0m1 = 2 * ampv * np.exp(-1j * psiv)
        X_0m1 = amph * np.exp(-1j * psih01) / (1j * sin(dpsih)) * (
            amph01 * np.exp(1j * dpsih) -
            np.append(amph01[1:], amph01[0]) * np.exp(-1j * dpsih01))
        X_0p1 = amph * np.exp(1j * psih01) / (1j * sin(dpsih)) * (
            amph01 * np.exp(1j * dpsih) -
            np.append(amph01[1:], amph01[0]) * np.exp(1j * dpsih01))
        Y_m10 = ampv * np.exp(-1j * psiv10) / (1j * sin(dpsiv)) * (
            ampv10 * np.exp(1j * dpsiv) -
            np.append(ampv10[1:], ampv10[0]) * np.exp(-1j * dpsiv10))
        Y_p10 = ampv * np.exp(1j * psiv10) / (1j * sin(dpsiv)) * (
            ampv10 * np.exp(1j * dpsiv) -
            np.append(ampv10[1:], ampv10[0]) * np.exp(1j * dpsiv10))

        #-- Construct f1001hv, f1001vh, f1010hv (these include math.sqrt(betv/beth) or math.sqrt(beth/betv))
        f1001hv = -np.conjugate(
            1 / (2j) * Y_m10 / X_m10)  #-- - sign from the different def
        f1001vh = -1 / (2j) * X_0m1 / Y_0m1  #-- - sign from the different def
        f1010hv = -1 / (2j) * Y_p10 / np.conjugate(
            X_m10)  #-- - sign from the different def
        f1010vh = -1 / (2j) * X_0p1 / np.conjugate(
            Y_0m1)  #-- - sign from the different def
        ##              f1001hv=conjugate(1/(2j)*Y_m10/X_m10)
        ##              f1001vh=1/(2j)*X_0m1/Y_0m1
        ##              f1010hv=1/(2j)*Y_p10/conjugate(X_m10)
        ##              f1010vh=1/(2j)*X_0p1/conjugate(Y_0m1)

        #-- Construct phases psih, psiv, Psih, Psiv w.r.t. the AC dipole
        psih = psih - (psih[k_bpmac_h] - psih_ac2bpmac[bpmac_h])
        psiv = psiv - (psiv[k_bpmac_v] - psiv_ac2bpmac[bpmac_v])
        print('the phase to the device', k_bpmac_h, psih[k_bpmac_h], bpmac_h,
              (psih[k_bpmac_h] - psih_ac2bpmac[bpmac_h]))
        Psih = psih - np.pi * Qh
        Psih[:k_bpmac_h] = Psih[:k_bpmac_h] + 2 * np.pi * Qh
        Psiv = psiv - np.pi * Qv
        Psiv[:k_bpmac_v] = Psiv[:k_bpmac_v] + 2 * np.pi * Qv

        Psix = np.arctan((1 - rh) / (1 + rh) * np.tan(Psih)) % np.pi
        Psiy = np.arctan((1 - rv) / (1 + rv) * np.tan(Psiv)) % np.pi
        for k in range(len(bpm)):
            if Psih[k] % (2 * np.pi) > np.pi: Psix[k] = Psix[k] + np.pi
            if Psiv[k] % (2 * np.pi) > np.pi: Psiy[k] = Psiy[k] + np.pi

        psix = Psix - np.pi * Qx
        psix[k_bpmac_h:] = psix[k_bpmac_h:] + 2 * np.pi * Qx
        psiy = Psiy - np.pi * Qy
        psiy[k_bpmac_v:] = psiy[k_bpmac_v:] + 2 * np.pi * Qy

        #-- Construct f1001h, f1001v, f1010h, f1010v (these include math.sqrt(betv/beth) or math.sqrt(beth/betv))
        f1001h = 1 / math.sqrt(1 -
                               rv**2) * (np.exp(-1j *
                                                (Psiv - Psiy)) * f1001hv +
                                         rv * np.exp(1j *
                                                     (Psiv + Psiy)) * f1010hv)
        f1010h = 1 / math.sqrt(1 -
                               rv**2) * (np.exp(1j * (Psiv - Psiy)) * f1010hv +
                                         rv * np.exp(-1j *
                                                     (Psiv + Psiy)) * f1001hv)
        f1001v = 1 / math.sqrt(1 - rh**2) * (
            np.exp(1j * (Psih - Psix)) * f1001vh +
            rh * np.exp(-1j * (Psih + Psix)) * np.conjugate(f1010vh))
        f1010v = 1 / math.sqrt(1 - rh**2) * (
            np.exp(1j * (Psih - Psix)) * f1010vh +
            rh * np.exp(-1j * (Psih + Psix)) * np.conjugate(f1001vh))

        #-- Construct f1001 and f1010 from h and v BPMs (these include math.sqrt(betv/beth) or math.sqrt(beth/betv))
        g1001h = np.exp(-1j * (
            (psih - psih[k_bpmac_h]) -
            (psiy - psiy[k_bpmac_v]))) * (ampv / amph * amph[k_bpmac_h] /
                                          ampv[k_bpmac_v]) * f1001h[k_bpmac_h]
        g1001h[:k_bpmac_h] = 1 / (np.exp(2 * np.pi * 1j * (Qh - Qy)) -
                                  1) * (f1001h - g1001h)[:k_bpmac_h]
        g1001h[k_bpmac_h:] = 1 / (1 - np.exp(-2 * np.pi * 1j *
                                             (Qh - Qy))) * (f1001h -
                                                            g1001h)[k_bpmac_h:]

        g1010h = np.exp(-1j * (
            (psih - psih[k_bpmac_h]) +
            (psiy - psiy[k_bpmac_v]))) * (ampv / amph * amph[k_bpmac_h] /
                                          ampv[k_bpmac_v]) * f1010h[k_bpmac_h]
        g1010h[:k_bpmac_h] = 1 / (np.exp(2 * np.pi * 1j * (Qh + Qy)) -
                                  1) * (f1010h - g1010h)[:k_bpmac_h]
        g1010h[k_bpmac_h:] = 1 / (1 - np.exp(-2 * np.pi * 1j *
                                             (Qh + Qy))) * (f1010h -
                                                            g1010h)[k_bpmac_h:]

        g1001v = np.exp(-1j * (
            (psix - psix[k_bpmac_h]) -
            (psiv - psiv[k_bpmac_v]))) * (amph / ampv * ampv[k_bpmac_v] /
                                          amph[k_bpmac_h]) * f1001v[k_bpmac_v]
        g1001v[:k_bpmac_v] = 1 / (np.exp(2 * np.pi * 1j * (Qx - Qv)) -
                                  1) * (f1001v - g1001v)[:k_bpmac_v]
        g1001v[k_bpmac_v:] = 1 / (1 - np.exp(-2 * np.pi * 1j *
                                             (Qx - Qv))) * (f1001v -
                                                            g1001v)[k_bpmac_v:]

        g1010v = np.exp(-1j * (
            (psix - psix[k_bpmac_h]) +
            (psiv - psiv[k_bpmac_v]))) * (amph / ampv * ampv[k_bpmac_v] /
                                          amph[k_bpmac_h]) * f1010v[k_bpmac_v]
        g1010v[:k_bpmac_v] = 1 / (np.exp(2 * np.pi * 1j * (Qx + Qv)) -
                                  1) * (f1010v - g1010v)[:k_bpmac_v]
        g1010v[k_bpmac_v:] = 1 / (1 - np.exp(-2 * np.pi * 1j *
                                             (Qx + Qv))) * (f1010v -
                                                            g1010v)[k_bpmac_v:]

        f1001x = np.exp(1j * (psih - psix)) * f1001h
        f1001x = f1001x - rh * np.exp(
            -1j * (psih + psix)) / rch * np.conjugate(f1010h)
        f1001x = f1001x - 2j * sin(np.pi * dh) * np.exp(1j *
                                                        (Psih - Psix)) * g1001h
        f1001x = f1001x - 2j * sin(np.pi * dh) * np.exp(
            -1j * (Psih + Psix)) / rch * np.conjugate(g1010h)
        f1001x = 1 / math.sqrt(1 - rh**2) * sin(np.pi * (Qh - Qy)) / sin(
            np.pi * (Qx - Qy)) * f1001x

        f1010x = np.exp(1j * (psih - psix)) * f1010h
        f1010x = f1010x - rh * np.exp(
            -1j * (psih + psix)) * rch * np.conjugate(f1001h)
        f1010x = f1010x - 2j * sin(np.pi * dh) * np.exp(1j *
                                                        (Psih - Psix)) * g1010h
        f1010x = f1010x - 2j * sin(np.pi * dh) * np.exp(
            -1j * (Psih + Psix)) * rch * np.conjugate(g1001h)
        f1010x = 1 / math.sqrt(1 - rh**2) * sin(np.pi * (Qh + Qy)) / sin(
            np.pi * (Qx + Qy)) * f1010x

        f1001y = np.exp(-1j * (psiv - psiy)) * f1001v
        f1001y = f1001y + rv * np.exp(1j * (psiv + psiy)) / rcv * f1010v
        f1001y = f1001y + 2j * sin(np.pi * dv) * np.exp(-1j *
                                                        (Psiv - Psiy)) * g1001v
        f1001y = f1001y - 2j * sin(np.pi * dv) * np.exp(
            1j * (Psiv + Psiy)) / rcv * g1010v
        f1001y = 1 / math.sqrt(1 - rv**2) * sin(np.pi * (Qx - Qv)) / sin(
            np.pi * (Qx - Qy)) * f1001y

        f1010y = np.exp(1j * (psiv - psiy)) * f1010v
        f1010y = f1010y + rv * np.exp(-1j * (psiv + psiy)) * rcv * f1001v
        f1010y = f1010y - 2j * sin(np.pi * dv) * np.exp(1j *
                                                        (Psiv - Psiy)) * g1010v
        f1010y = f1010y + 2j * sin(np.pi * dv) * np.exp(
            -1j * (Psiv + Psiy)) * rcv * g1001v
        f1010y = 1 / math.sqrt(1 - rv**2) * sin(np.pi * (Qx + Qv)) / sin(
            np.pi * (Qx + Qy)) * f1010y

        #-- For B2, must be double checked
        if bd == -1:
            f1001x = -np.conjugate(f1001x)
            f1001y = -np.conjugate(f1001y)
            f1010x = -np.conjugate(f1010x)
            f1010y = -np.conjugate(f1010y)

        #-- Separate to amplitudes and phases, amplitudes averaged to cancel math.sqrt(betv/beth) and math.sqrt(beth/betv)
        for k in range(len(bpm)):
            f1001Abs[k][i] = math.sqrt(abs(f1001x[k] * f1001y[k]))
            f1010Abs[k][i] = math.sqrt(abs(f1010x[k] * f1010y[k]))
            f1001xArg[k][i] = np.angle(f1001x[k]) % (2 * np.pi)
            f1001yArg[k][i] = np.angle(f1001y[k]) % (2 * np.pi)
            f1010xArg[k][i] = np.angle(f1010x[k]) % (2 * np.pi)
            f1010yArg[k][i] = np.angle(f1010y[k]) % (2 * np.pi)

    #-- Output
    fwqw = {}
    goodbpm = []
    for k in range(len(bpm)):

        #-- Bad BPM flag based on phase
        badbpm = 0
        f1001xArgAve = phase.calc_phase_mean(f1001xArg[k], 2 * np.pi)
        f1001yArgAve = phase.calc_phase_mean(f1001yArg[k], 2 * np.pi)
        f1010xArgAve = phase.calc_phase_mean(f1010xArg[k], 2 * np.pi)
        f1010yArgAve = phase.calc_phase_mean(f1010yArg[k], 2 * np.pi)
        #This seems to be to conservative or somethings...
        if min(abs(f1001xArgAve - f1001yArgAve),
               2 * np.pi - abs(f1001xArgAve - f1001yArgAve)) > np.pi / 2:
            badbpm = 1
        if min(abs(f1010xArgAve - f1010yArgAve),
               2 * np.pi - abs(f1010xArgAve - f1010yArgAve)) > np.pi / 2:
            badbpm = 1

        badbpm = 0
        #-- Output
        if badbpm == 0:
            f1001AbsAve = np.mean(f1001Abs[k])
            f1010AbsAve = np.mean(f1010Abs[k])
            f1001ArgAve = phase.calc_phase_mean(
                np.append(f1001xArg[k], f1001yArg[k]), 2 * np.pi)
            f1010ArgAve = phase.calc_phase_mean(
                np.append(f1010xArg[k], f1010yArg[k]), 2 * np.pi)
            f1001Ave = f1001AbsAve * np.exp(1j * f1001ArgAve)
            f1010Ave = f1010AbsAve * np.exp(1j * f1010ArgAve)
            f1001AbsStd = math.sqrt(np.mean((f1001Abs[k] - f1001AbsAve)**2))
            f1010AbsStd = math.sqrt(np.mean((f1010Abs[k] - f1010AbsAve)**2))
            f1001ArgStd = phase.calc_phase_std(
                np.append(f1001xArg[k], f1001yArg[k]), 2 * np.pi)
            f1010ArgStd = phase.calc_phase_std(
                np.append(f1010xArg[k], f1010yArg[k]), 2 * np.pi)
            fwqw[bpm[k][1]] = [[f1001Ave, f1001AbsStd, f1010Ave, f1010AbsStd],
                               [
                                   f1001ArgAve / (2 * np.pi),
                                   f1001ArgStd / (2 * np.pi),
                                   f1010ArgAve / (2 * np.pi),
                                   f1010ArgStd / (2 * np.pi)
                               ]]  #-- Phases renormalized to [0,1)
            goodbpm.append(bpm[k])

    #-- Global parameters not implemented yet
    fwqw['Global'] = ['"null"', '"null"']

    return [fwqw, goodbpm]
Beispiel #34
0
def find_lts(iq, thresh=0.8, us=1, cp=32, flip=False, lts_seq=[]):
    """
		Find the indices of LTSs in the input "iq" signal (upsampled by a factor of "up").
		"thresh" sets sensitivity.

		Inputs:
			iq: IQ samples
			thresh: threshold to detect peak
			us: upsampling factor, needed for generate_training_seq() function
			cp: cyclic prefix
			flip: Flag to specify order or LTS sequence.
			lts_seq: if transmitted lts sequence is provided, use it, otherwise generate it

		Returns:
			best_pk: highest LTS peak,
			lts_pks: the list of all detected LTSs, and
			lts_corr: the correlated signal, multiplied by itself delayed by 1/2 an LTS
	"""
    debug = False

    # If original signal not provided, generate LTS
    lts_seq = np.asarray(lts_seq)
    if lts_seq.size == 0:
        # full lts contains 2.5 64-sample-LTS sequences, we need only one symbol
        lts, lts_f = generate_training_seq(preamble_type='lts',
                                           cp=cp,
                                           upsample=us)
        peak_spacing = 64
    else:
        # If provided...
        lts = lts_seq
        # Special case - If half lts used
        if len(lts_seq) == 80:
            peak_spacing = 80
        else:
            peak_spacing = 64

    lts_tmp = lts[-64:]
    if flip:
        lts_flip = lts_tmp[::-1]
    else:
        lts_flip = lts_tmp

    lts_flip_conj = np.conjugate(lts_flip)
    sign_fct = iq / abs(iq)  # Equivalent to Matlab's sign function (X/abs(X))
    sign_fct = np.nan_to_num(sign_fct)  # Replace NaN values
    lts_corr = np.abs(np.convolve(lts_flip_conj, sign_fct))

    lts_pks = np.where(lts_corr > (thresh * np.max(lts_corr)))
    lts_pks = np.squeeze(lts_pks)
    x_vec, y_vec = np.meshgrid(lts_pks, lts_pks)

    # second_peak_idx, y = np.where((y_vec - x_vec) == len(lts_tmp))
    second_peak_idx, y = np.where((y_vec - x_vec) == peak_spacing)

    # To save mat files
    # sio.savemat('rx_iq_pilot.mat', {'iq_pilot': iq})

    if not second_peak_idx.any():
        if debug:
            print("NO LTS FOUND!")
        best_pk = []
    else:
        best_pk = lts_pks[
            second_peak_idx[0]]  # Grab only the first packet we have received

    if debug:
        # print("LTS: {}, BEST: {}".format(lts_pks, lts_pks[second_peak_idx]))
        if lts_pks.size > 1:
            fig = plt.figure()
            ax1 = fig.add_subplot(2, 1, 1)
            ax1.grid(True)
            ax1.plot(np.abs(iq))
            ax2 = fig.add_subplot(2, 1, 2)
            ax2.grid(True)
            ax2.stem(np.abs(lts_corr))
            ax2.scatter(lts_pks, 2 * np.ones(len(lts_pks)))
            plt.show()

    return best_pk, lts_pks, lts_corr
Beispiel #35
0
D = np.zeros((len(kappa), len(tau)))
U = np.zeros(np.shape(D))
D0 = np.zeros(np.shape(D))
difference = np.zeros(np.shape(D))
dt = tau/750
Ts = np.ones(len(tau), dtype="int")*int(500)
T = 750 

nu = 1.2
F0 = 12/nu
Pe = 1
Dm  = 1
for i in range(len(tau)):
	omega = 2*np.pi/tau[i]
	rho = np.sqrt(1j*omega/Dm)
	rho_c = np.conjugate(rho)
	gamma = np.sqrt(1j*omega/nu)
	gamma_c = np.conjugate(gamma)
	D0[:,i] = 1 + Pe*Pe*F0*F0*np.tanh(gamma)*np.tanh(gamma_c)/(4*gamma*gamma_c*(gamma**4 - rho**4))*(1/(gamma*gamma)*(gamma/np.tanh(gamma) - gamma_c/np.tanh(gamma_c)) - 1/(rho*rho)*(rho/np.tanh(rho) - rho_c/np.tanh(rho_c)))


plt.figure(1)
kappa_cont = np.linspace(min(kappa), max(kappa), int(1e4))

for i in range(len(kappa)):
	for j in range(len(tau)):
		data = np.loadtxt(base+"Lx" +  str(Lx[i]) +"_tau"+ str(round(tau[j], 3)) +"_eps"+epsilon+"_nu1.2_D1.0_fzero0.0_fone12.0_res150_dt" + str(round(dt[j], 6)) + "/tdata.dat")
		print(kappa[i], tau[j], np.shape(data), np.shape(D))
		D[i, j] = sci.trapz(  data[:, 8][-T:],  data[:, 0][-T:] )/tau[j]
		difference[i, j] = abs(D[i, j] - sci.trapz(  np.trim_zeros(data[:, 8])[-2*T:-T],  np.trim_zeros(data[:, 0])[-2*T:-T] )/tau[j])/D[i,j]
		U[i, j] = sci.trapz(  data[:, 4][-T:],  data[:, 0][-T:] )/tau[j]
Beispiel #36
0
 def getpower(self):
     """
     returns square of wavelet coefficient array
     """
     return (self.cwt* NP.conjugate(self.cwt)).real
Beispiel #37
0
def _epoch_spectral_connectivity(data,
                                 sig_idx,
                                 tmin_idx,
                                 tmax_idx,
                                 sfreq,
                                 mode,
                                 window_fun,
                                 eigvals,
                                 wavelets,
                                 freq_mask,
                                 mt_adaptive,
                                 idx_map,
                                 block_size,
                                 psd,
                                 accumulate_psd,
                                 con_method_types,
                                 con_methods,
                                 n_signals,
                                 n_times,
                                 accumulate_inplace=True):
    """Connectivity estimation for one epoch see spectral_connectivity."""
    n_cons = len(idx_map[0])

    if wavelets is not None:
        n_times_spectrum = n_times
        n_freqs = len(wavelets)
    else:
        n_times_spectrum = 0
        n_freqs = np.sum(freq_mask)

    if not accumulate_inplace:
        # instantiate methods only for this epoch (used in parallel mode)
        con_methods = [
            mtype(n_cons, n_freqs, n_times_spectrum)
            for mtype in con_method_types
        ]

    if len(sig_idx) == n_signals:
        # we use all signals: use a slice for faster indexing
        sig_idx = slice(None, None)

    # compute tapered spectra
    if mode in ['multitaper', 'fourier']:
        x_mt = list()
        this_psd = list()
        sig_pos_start = 0
        for this_data in data:
            this_n_sig = this_data.shape[0]
            sig_pos_end = sig_pos_start + this_n_sig
            if not isinstance(sig_idx, slice):
                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
                                       (sig_idx < sig_pos_end)] - sig_pos_start
            else:
                this_sig_idx = sig_idx
            if isinstance(this_data, _BaseSourceEstimate):
                _mt_spectra_partial = partial(_mt_spectra,
                                              dpss=window_fun,
                                              sfreq=sfreq)
                this_x_mt = this_data.transform_data(_mt_spectra_partial,
                                                     idx=this_sig_idx,
                                                     tmin_idx=tmin_idx,
                                                     tmax_idx=tmax_idx)
            else:
                this_x_mt, _ = _mt_spectra(
                    this_data[this_sig_idx, tmin_idx:tmax_idx], window_fun,
                    sfreq)

            if mt_adaptive:
                # compute PSD and adaptive weights
                _this_psd, weights = _psd_from_mt_adaptive(this_x_mt,
                                                           eigvals,
                                                           freq_mask,
                                                           return_weights=True)

                # only keep freqs of interest
                this_x_mt = this_x_mt[:, :, freq_mask]
            else:
                # do not use adaptive weights
                this_x_mt = this_x_mt[:, :, freq_mask]
                if mode == 'multitaper':
                    weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
                else:
                    # hack to so we can sum over axis=-2
                    weights = np.array([1.])[:, None, None]

                if accumulate_psd:
                    _this_psd = _psd_from_mt(this_x_mt, weights)

            x_mt.append(this_x_mt)
            if accumulate_psd:
                this_psd.append(_this_psd)

        x_mt = np.concatenate(x_mt, axis=0)
        if accumulate_psd:
            this_psd = np.concatenate(this_psd, axis=0)

        # advance position
        sig_pos_start = sig_pos_end

    elif mode == 'cwt_morlet':
        # estimate spectra using CWT
        x_cwt = list()
        this_psd = list()
        sig_pos_start = 0
        for this_data in data:
            this_n_sig = this_data.shape[0]
            sig_pos_end = sig_pos_start + this_n_sig
            if not isinstance(sig_idx, slice):
                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
                                       (sig_idx < sig_pos_end)] - sig_pos_start
            else:
                this_sig_idx = sig_idx
            if isinstance(this_data, _BaseSourceEstimate):
                cwt_partial = partial(cwt,
                                      Ws=wavelets,
                                      use_fft=True,
                                      mode='same')
                this_x_cwt = this_data.transform_data(cwt_partial,
                                                      idx=this_sig_idx,
                                                      tmin_idx=tmin_idx,
                                                      tmax_idx=tmax_idx)
            else:
                this_x_cwt = cwt(this_data[this_sig_idx, tmin_idx:tmax_idx],
                                 wavelets,
                                 use_fft=True,
                                 mode='same')

            if accumulate_psd:
                this_psd.append((this_x_cwt * this_x_cwt.conj()).real)

            x_cwt.append(this_x_cwt)

            # advance position
            sig_pos_start = sig_pos_end

        x_cwt = np.concatenate(x_cwt, axis=0)
        if accumulate_psd:
            this_psd = np.concatenate(this_psd, axis=0)
    else:
        raise RuntimeError('invalid mode')

    # accumulate or return psd
    if accumulate_psd:
        if accumulate_inplace:
            psd += this_psd
        else:
            psd = this_psd
    else:
        psd = None

    # tell the methods that a new epoch starts
    for method in con_methods:
        method.start_epoch()

    # accumulate connectivity scores
    if mode in ['multitaper', 'fourier']:
        for i in range(0, n_cons, block_size):
            con_idx = slice(i, i + block_size)
            if mt_adaptive:
                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
                                   x_mt[idx_map[1][con_idx]],
                                   weights[idx_map[0][con_idx]],
                                   weights[idx_map[1][con_idx]])
            else:
                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
                                   x_mt[idx_map[1][con_idx]], weights, weights)

            for method in con_methods:
                method.accumulate(con_idx, csd)
    else:
        # cwt_morlet mode
        for i in range(0, n_cons, block_size):
            con_idx = slice(i, i + block_size)

            csd = x_cwt[idx_map[0][con_idx]] * \
                np.conjugate(x_cwt[idx_map[1][con_idx]])
            for method in con_methods:
                method.accumulate(con_idx, csd)

    return con_methods, psd
Beispiel #38
0
def PureQubits_to_InnerSquared(pureQubitA, pureQubitB):
    amplitude = np.inner(pureQubitA, np.conjugate(pureQubitB).T)
    return amplitude * np.conjugate(amplitude)
 def multipliers(x, y):
     #---use the fact that all real-valued functions are symmetric under FFT
     return x * np.conjugate(y)
Beispiel #40
0
 def scalarProduct(n, m):
     return 0.5 * np.trace(np.dot(m, np.conjugate(n).T))
Beispiel #41
0
def plotSynDimBandStructGenTB(omega, delta, epsilon, tx, S, m0,c=c,kList=np.linspace(-1.0,1.0,600),save=False,plots=True):
    i=0  
        
    s=int(2*S+1)
    E=np.zeros((kList.size,s))
    m=np.zeros((kList.size,s))
    pops=np.zeros((kList.size,s,s))
    ktot=np.zeros((kList.size,s))
    
    mFvect=np.array([np.float(j-S) for j in range(s)])
  #  mFvect=np.arange(s)
    magInUnitCell=np.zeros((kList.size,s-2))
    for k in kList:
        H,kstates=RamanLatHamTB(k, omega, delta, epsilon, tx, S,m0,c=c)
        Energies, eigenstates = LA.eig(H)
        sort = np.argsort(Energies)
        Esorted, eVsorted = Energies[sort], eigenstates[:,sort]
        E[i]=Esorted
        ev=eVsorted.transpose()
        evA=ev.reshape(s,s)
        popsA=evA*np.conjugate(evA)
        pops[i]=popsA
        
        evB=ev.reshape(s,s)
        popsB=evB*np.conjugate(evB)
        ktot[i]=np.einsum('i,ji->j',kstates,popsB)
        
        m[i]=np.dot(popsA,mFvect)
        for j in range(s-2):
            magInUnitCell[i,j]=(-popsA[0,j]+popsA[0,j+2])/(popsA[0,j]+popsA[0,j+1]+popsA[0,j+2])
        i=i+1
   
    if plots:
        figure=plt.figure()
        panel=figure.add_subplot(1,1,1)
      #  panel.set_title(r'$\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        for i in range(s):   
            d=panel.scatter(kList,E[:,i],c=m[:,i],vmin=-S,vmax=S, marker='_')
        cbar = figure.colorbar(d,ticks=np.array([-S,0,S]))
      #  cbar.ax.tick_params(labelsize=15) 
        cbar.set_label(r'$\langle m \rangle$', size=16)
        panel.set_xlabel(r'Crystal momentum [$k_L$]',size=16)
        panel.set_ylabel(r'Energy [$E_L$]',size=16)
        if saveBandStruct:
            plt.savefig('Z:/My Documents/papers etc/talks and posters/largeBS.jpg',dpi=400)

    
#    fig2=plt.figure()
#    pan2=fig2.add_subplot(1,1,1)    
#    pan2.set_title(r'$\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
#
#    for i in range(s):   
#        pan2.plot(kList,ktot[:,i],label='band '+str(i))
#    pan2.set_xlabel(r'$q/k_L$')
#    pan2.set_ylabel(r'Total momentum $[k_L]$')
#    plt.legend()
    
    
    kM=kList[np.where(pops[:,0,S-1]==np.max(pops[:,0,S-1]))]
    k0=kList[np.where(pops[:,0,S]==np.max(pops[:,0,S]))]
    kP=kList[np.where(pops[:,0,S+1]==np.max(pops[:,0,S+1]))]
    print kM, k0, kP
    
    chern=2.0*2.0/(3.0*(kP-kM))
    print 'chern number from slope lowest band = ' +str(chern)
    mM=m[kList.size/6,0]
    mP=m[-kList.size/6-1,0]
    print mM, mP
    print 'chern number from displacement lowest band = ' + str((mP-mM)/2.0)
    
    if plots:
        fig3=plt.figure()
    #    fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest band')    
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=pops[:,0,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
        
        pan3.set_ylabel('Synthetic lattice site')
        pan3.set_xlabel(r'Crystal momentum [$k_L$]')
        
        fig3=plt.figure()
#        fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest bands')  
    
    popAvg=np.zeros(pops[:,0,:].shape)
    for ind,k in enumerate(kList):
        for i in range(s):
            popAvg[ind,i]=np.average(pops[ind,0:int(np.ceil(s/q)),i])
    if plots:   
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=popAvg[:,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
    
    kM=kList[np.where(popAvg[:,S-1]==np.max(popAvg[:,S-1]))]
    k0=kList[np.where(popAvg[:,S]==np.max(popAvg[:,S]))]
    kP=kList[np.where(popAvg[:,S+1]==np.max(popAvg[:,S+1]))]
    #print kM, k0, kP
    
    chern2=2.0*2.0/(3.0*(kP-kM))
    print 'chern number from slope lowest '+str(int(np.ceil(s/q))) +' bands = ' +str(chern2)

    if plots:
        pan3.set_ylabel('Synthetic lattice site')
        pan3.set_xlabel(r'Crystal momentum [$k_L$]')
    
        fig3=plt.figure()
 #       fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest bands no edges')      
    
    popAvg2=np.zeros(pops[:,0,:].shape)
    for ind,k in enumerate(kList):
        for i in range(s):
            popAvg2[ind,i]=np.average(pops[ind,0:int(np.ceil(s/q))-1,i])
            
    if plots:
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=popAvg2[:,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
            
        fig3=plt.figure()
#        fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Edge band')      
            
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=pops[:,int(np.ceil(s/q))-1,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)

#    maxSite=np.zeros(kList.size)
#    for i in np.arange(kList.size):
#        maxSite[i]=np.where(pops[i,0,:]==np.max(pops[i,0,:]))[0][0]
#        
#    fig4=plt.figure()
#    pan4=fig4.add_subplot(1,1,1)
#    pan4.plot(kList,maxSite)
#    pan4.set_xlabel(r'$q/k_L$')
#    pan4.set_ylabel('synthetic site with maximum population')
    
#    pan4=fig3.add_subplot(2,1,2)    
#    pan4.set_title('Second band')    
#    for i in range(s):    
#        pan4.scatter(kList,[i for j in range(kList.size)],c=pops[:,1,i],vmin=0,vmax=1.0, cmap='Blues', marker='_',linewidths=10)
#    pan4.set_ylabel('Synthetic lattice site')
#    pan5=fig3.add_subplot(3,1,3)    
#    pan5.set_title('Average of First and Second band')    
#    for i in range(s):    
#        pan5.scatter(kList,[i for j in range(kList.size)],c=(pops[:,1,i]+pops[:,0,i])/2.0,vmin=0,vmax=1.0,cmap='Blues', marker='_',linewidths=10)
#    pan5.set_ylabel('Synthetic lattice site')
#    pan5.set_xlabel(r'$k/k_L$')
    mBands=np.dot(popAvg,mFvect)
    
    mM=mBands[kList.size/6]
    mP=mBands[-kList.size/6-1]
    print mM, mP
    print 'chern number from displacement lowest band = ' + str((mP-mM)/2.0)
    
    if plots:
        fig4=plt.figure()
        pan4=fig4.add_subplot(1,1,1)
        pan4.plot(kList,m[:,0],'k-',lw=2, label = 'lowest band')
        pan4.plot(kList,mBands,'b-',lw=2, label = 'lowest '+str(int(np.ceil(s/q))) +' bands' )
    #    for j in range(s-2):
    #        pan4.plot(kList,magInUnitCell[:,j])
        pan4.set_xlabel(r'Crystal momentum [$k_L$]')
        pan4.set_ylabel('Magnetization')
        legend()
        cDict={}
        cDict[-2]='m-'
        cDict[-1]='r-'
        cDict[0]='g-'
        cDict[1]='b-'
        cDict[2]='c-'
    #
        fig5=plt.figure()
        pan5=fig5.add_subplot(1,1,1)
        for mF in np.arange(-S,S+1):
            pan5.plot(kList,pops[:,0,mF+S], cDict[mF], label=r'$m_F$='+str(mF))
            pan5.plot(kList+2.0,pops[:,1,mF+S],cDict[mF])
        pan5.set_xlabel(r'Crystal momentum [$k_L$]')
        pan5.set_ylabel('Fractional populations')
        pan5.set_title('Lowest band to second band')
        plt.legend()
    #
    #    fig6=plt.figure()
    #    pan6=fig6.add_subplot(1,1,1)
    #    for mF in np.arange(-S,S+1):
    #        pan6.plot(kList,pops[:,1,mF+S],label=r'$m_F$='+str(mF))
    #    pan6.set_xlabel(r'Crystal momentum [$k_L$]')
    #    pan6.set_ylabel('Fractional populations')
    #    pan6.set_title('Second band')
    #    plt.legend()    
    
    if save:
        filename='SynDimBandStructure_F'+str(S)+'_n'+str(n)+'_Chern'+str(int(c))
        if Flat:
            filename=filename+'Flat'
        print filename
        np.savez(filename, omega=omega,delta=delta,epsilon=epsilon,U=U,kList=kList,E=E,m=m,pops=pops,popAvg=popAvg)
#    
#    a=np.arange(2*S+1)
#    nbar=np.dot(m1pops,a)
#    fig7=plt.figure()
#    pan7=fig7.add_subplot(1,1,1)
#    pan7.plot(kList,nbar)
#    pan7.set_xlabel(r'Crystal momentum [$k_L$]')
#    pan7.set_ylabel('Center of mass')
#    plt.legend()   
    return kList, E, m, chern, chern2
Beispiel #42
0
def PureQubit_to_QubitDM(pureQubit):
    return np.outer(pureQubit, np.conjugate(pureQubit).T)
Beispiel #43
0
 def dag(a: np.ndarray) -> np.ndarray:
     return np.transpose(np.conjugate(a))
Beispiel #44
0
def _transpose(a, perm=None, conjugate=False, name='transpose'):  # pylint: disable=unused-argument
  x = np.transpose(a, perm)
  return np.conjugate(x) if conjugate else x
def outerpr(v1, v2):
    v1 = np.conjugate(v1)
    return np.outer(v1, v2)
Beispiel #46
0
def plotSynDimBandStructGenTBClean(omega, delta, epsilon, tx, S, m0,c=c,kList=np.linspace(-1.0,1.0,600),save=False,plots=True):
    i=0  
        
    s=int(2*S+1)
    E=np.zeros((kList.size,s))
    m=np.zeros((kList.size,s))
    pops=np.zeros((kList.size,s,s))
    ktot=np.zeros((kList.size,s))
    
    mFvect=np.array([np.float(j-S) for j in range(s)])
  #  mFvect=np.arange(s)
    magInUnitCell=np.zeros((kList.size,s-2))
    for k in kList:
        H,kstates=RamanLatHamTB(k, omega, delta, epsilon, tx, S,m0,c=c)
        Energies, eigenstates = LA.eig(H)
        sort = np.argsort(Energies)
        Esorted, eVsorted = Energies[sort], eigenstates[:,sort]
        E[i]=Esorted
        ev=eVsorted.transpose()
        evA=ev.reshape(s,s)
        popsA=evA*np.conjugate(evA)
        pops[i]=popsA
        
        evB=ev.reshape(s,s)
        popsB=evB*np.conjugate(evB)
        ktot[i]=np.einsum('i,ji->j',kstates,popsB)
        
        m[i]=np.dot(popsA,mFvect)
        for j in range(s-2):
            magInUnitCell[i,j]=(-popsA[0,j]+popsA[0,j+2])/(popsA[0,j]+popsA[0,j+1]+popsA[0,j+2])
        i=i+1
   
    if plots:
        figure=plt.figure()
        panel=figure.add_subplot(1,1,1)
      #  panel.set_title(r'$\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        for i in range(s):   
            d=panel.scatter(kList,E[:,i],c=m[:,i],vmin=-S,vmax=S, marker='_')
        cbar = figure.colorbar(d,ticks=np.array([-S,0,S]))
      #  cbar.ax.tick_params(labelsize=15) 
        cbar.set_label(r'$\langle m \rangle$', size=16)
        panel.set_xlabel(r'Crystal momentum [$k_L$]',size=16)
        panel.set_ylabel(r'Energy [$E_L$]',size=16)
        if saveBandStruct:
            plt.savefig('Z:/My Documents/papers etc/talks and posters/largeBS.jpg',dpi=400)


    
    if plots:
        fig3=plt.figure()
    #    fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest band')    
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=pops[:,0,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
        
        pan3.set_ylabel('Synthetic lattice site')
        pan3.set_xlabel(r'Crystal momentum [$k_L$]')
        
        fig3=plt.figure()
#        fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest bands')  
    
    popAvg=np.zeros(pops[:,0,:].shape)
    for ind,k in enumerate(kList):
        for i in range(s):
            popAvg[ind,i]=np.average(pops[ind,0:int(np.ceil(s/q)),i])
    if plots:   
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=popAvg[:,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
    

    if plots:
        pan3.set_ylabel('Synthetic lattice site')
        pan3.set_xlabel(r'Crystal momentum [$k_L$]')
    
        fig3=plt.figure()
 #       fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Lowest bands no edges')      
    
    popAvg2=np.zeros(pops[:,0,:].shape)
    for ind,k in enumerate(kList):
        for i in range(s):
            popAvg2[ind,i]=np.average(pops[ind,0:int(np.ceil(s/q))-1,i])
            
    if plots:
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=popAvg2[:,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)
            
        fig3=plt.figure()
#        fig3.suptitle(r'F= '+str(S)+r', $\Omega$ = '+str(np.round(omega,2))+r'$E_L$, $\delta$ = '+str(np.round(delta,3))+r'$E_L$, $\epsilon$ = '+str(np.round(epsilon,3))+r'$E_L$, U = '+str(np.round(U,2))+r'$E_L$')
        pan3=fig3.add_subplot(1,1,1)    
        pan3.set_title('Edge band')      
            
        for i in range(s):    
            pan3.scatter(kList,[i for j in range(kList.size)],c=pops[:,int(np.ceil(s/q))-1,i],cmap='Blues',vmin=0.0,vmax=1.0/S,marker='_',linewidths=10)


    return kList, E, m, pops
Beispiel #47
0
from numpy import conjugate,angle
z = complex(input("Podaj liczbę zespoloną: "))
print(abs(z))
print(conjugate(z))
print(angle(z))
Beispiel #48
0
 def conjugate(self):
     """Return the conjugate of the operator."""
     ret = self.copy()
     ret._coeff = np.conjugate(self.coeff)
     return ret
#行列Aに行列Bをかける
B = np.matrix([[4., 2.], [-1., 3.], [1., 5.]])
print("AB= \n", np.matmul(A, B))
print("AB= \n", np.einsum("mk,kn->mn", A, B))

#行列Aと行列Bのアダマール積
B = np.matrix([[-1., 2., 4.], [1., 8., 6.]])
print("A*B= \n", np.multiply(A, B))

#行列Aの転置
print("A^T= \n", A.T)
print("A^T= \n", np.transpose(A, axes=(1, 0)))
print("A^T= \n", np.swapaxes(A, 1, 0))

#複素行列のエルミート転置
A = np.matrix([[3., 1. + 2.j, 2. + 3.j], [2., 3. - 4.j, 1. + 3.j]])
print("A^H= \n", A.H)
print("A^H= \n", np.swapaxes(np.conjugate(A), 1, 0))

#行列の積に対するエルミート転置
A = np.matrix([[3., 1. + 2.j, 2. + 3.j], [2., 3. - 4.j, 1. + 3.j]])
B = np.matrix([[4. + 4.j, 2. - 3.j], [-1. + 1.j, 3. - 2.j],
               [1. + 3.j, 5. + 5.j]])
print("(AB)^H= \n", (np.matmul(A, B)).H)
print("B^H A^H= \n", np.matmul(B.H, A.H))

#単位行列の定義
I = np.eye(N=3)
print("I = \n", I)
def dotpr(v1, v2):
    v2 = np.conjugate(v2)
    return np.inner(v1, v2)
Beispiel #51
0
def symmetric_strength_of_connection(A, theta=0):
    """Symmetric Strength Measure.

    Compute strength of connection matrix using the standard symmetric measure

    An off-diagonal connection A[i,j] is strong iff::

        abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )

    Parameters
    ----------
    A : csr_matrix
        Matrix graph defined in sparse format.  Entry A[i,j] describes the
        strength of edge [i,j]
    theta : float
        Threshold parameter (positive).

    Returns
    -------
    S : csr_matrix
        Matrix graph defining strong connections.  S[i,j]=1 if vertex i
        is strongly influenced by vertex j.

    See Also
    --------
    symmetric_strength_of_connection : symmetric measure used in SA
    evolution_strength_of_connection : relaxation based strength measure

    Notes
    -----
        - For vector problems, standard strength measures may produce
          undesirable aggregates.  A "block approach" from Vanek et al. is used
          to replace vertex comparisons with block-type comparisons.  A
          connection between nodes i and j in the block case is strong if::

          ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]

          is the matrix block (degrees of freedom) associated with nodes k and
          l and ||.|| is a matrix norm, such a Frobenius.

        - See [1996bVaMaBr]_ for more details.

    References
    ----------
    .. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
       "Algebraic Multigrid by Smoothed Aggregation for
       Second and Fourth Order Elliptic Problems",
       Computing, vol. 56, no. 3, pp. 179--196, 1996.
       http://citeseer.ist.psu.edu/vanek96algebraic.html

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import symmetric_strength_of_connection
    >>> n=3
    >>> stencil = np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = symmetric_strength_of_connection(A, 0.0)

    """
    if theta < 0:
        raise ValueError('expected a positive theta')

    if sparse.isspmatrix_csr(A):
        # if theta == 0:
        #     return A

        Sp = np.empty_like(A.indptr)
        Sj = np.empty_like(A.indices)
        Sx = np.empty_like(A.data)

        fn = amg_core.symmetric_strength_of_connection
        fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)

        S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)

    elif sparse.isspmatrix_bsr(A):
        M, N = A.shape
        R, C = A.blocksize

        if R != C:
            raise ValueError('matrix must have square blocks')

        if theta == 0:
            data = np.ones(len(A.indices), dtype=A.dtype)
            S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
                                  shape=(int(M / R), int(N / C)))
        else:
            # the strength of connection matrix is based on the
            # Frobenius norms of the blocks
            data = (np.conjugate(A.data) * A.data).reshape(-1, R * C)
            data = data.sum(axis=1)
            A = sparse.csr_matrix((data, A.indices, A.indptr),
                                  shape=(int(M / R), int(N / C)))
            return symmetric_strength_of_connection(A, theta)
    else:
        raise TypeError('expected csr_matrix or bsr_matrix')

    # Strength represents "distance", so take the magnitude
    S.data = np.abs(S.data)

    # Scale S by the largest magnitude entry in each row
    S = scale_rows_by_largest_entry(S)

    return S
Beispiel #52
0
def create_awterm_convolutionfunction(im,
                                      make_pb=None,
                                      nw=1,
                                      wstep=1e15,
                                      oversampling=8,
                                      support=6,
                                      use_aaf=True,
                                      maxsupport=512):
    """ Fill AW projection kernel into a GridData.

    :param im: Image template
    :param make_pb: Function to make the primary beam model image (hint: use a partial)
    :param nw: Number of w planes
    :param wstep: Step in w (wavelengths)
    :param oversampling: Oversampling of the convolution function in uv space
    :return: griddata correction Image, griddata kernel as GridData
    """
    d2r = numpy.pi / 180.0

    # We only need the griddata correction function for the PSWF so we make
    # it for the shape of the image
    nchan, npol, ony, onx = im.data.shape

    assert isinstance(im, Image)
    # Calculate the template convolution kernel.
    cf = create_convolutionfunction_from_image(im,
                                               oversampling=oversampling,
                                               support=support)

    cf_shape = list(cf.data.shape)
    cf_shape[2] = nw
    cf.data = numpy.zeros(cf_shape).astype('complex')

    cf.grid_wcs.wcs.crpix[4] = nw // 2 + 1.0
    cf.grid_wcs.wcs.cdelt[4] = wstep
    cf.grid_wcs.wcs.ctype[4] = 'WW'
    if numpy.abs(wstep) > 0.0:
        w_list = cf.grid_wcs.sub([5]).wcs_pix2world(range(nw), 0)[0]
    else:
        w_list = [0.0]

    assert isinstance(oversampling, int)
    assert oversampling > 0

    nx = max(maxsupport, 2 * oversampling * support)
    ny = max(maxsupport, 2 * oversampling * support)

    qnx = nx // oversampling
    qny = ny // oversampling

    cf.data[...] = 0.0

    subim = copy_image(im)
    ccell = onx * numpy.abs(d2r * subim.wcs.wcs.cdelt[0]) / qnx

    subim.data = numpy.zeros([nchan, npol, qny, qnx])
    subim.wcs.wcs.cdelt[0] = -ccell / d2r
    subim.wcs.wcs.cdelt[1] = +ccell / d2r
    subim.wcs.wcs.crpix[0] = qnx // 2 + 1.0
    subim.wcs.wcs.crpix[1] = qny // 2 + 1.0

    if use_aaf:
        this_pswf_gcf, _ = create_pswf_convolutionfunction(subim,
                                                           oversampling=1,
                                                           support=6)
        norm = 1.0 / this_pswf_gcf.data
    else:
        norm = 1.0

    if make_pb is not None:
        pb = make_pb(subim)
        rpb, footprint = reproject_image(pb, subim.wcs, shape=subim.shape)
        rpb.data[footprint.data < 1e-6] = 0.0
        norm *= rpb.data

    # We might need to work with a larger image
    padded_shape = [nchan, npol, ny, nx]
    thisplane = copy_image(subim)
    thisplane.data = numpy.zeros(thisplane.shape, dtype='complex')
    for z, w in enumerate(w_list):
        thisplane.data[...] = 0.0 + 0.0j
        thisplane = create_w_term_like(thisplane, w, dopol=True)
        thisplane.data *= norm
        paddedplane = pad_image(thisplane, padded_shape)
        paddedplane = fft_image(paddedplane)

        ycen, xcen = ny // 2, nx // 2
        for y in range(oversampling):
            ybeg = y + ycen + (support * oversampling) // 2 - oversampling // 2
            yend = y + ycen - (support * oversampling) // 2 - oversampling // 2
            # vv = range(ybeg, yend, -oversampling)
            for x in range(oversampling):
                xbeg = x + xcen + (support *
                                   oversampling) // 2 - oversampling // 2
                xend = x + xcen - (support *
                                   oversampling) // 2 - oversampling // 2

                # uu = range(xbeg, xend, -oversampling)
                cf.data[..., z, y,
                        x, :, :] = paddedplane.data[...,
                                                    ybeg:yend:-oversampling,
                                                    xbeg:xend:-oversampling]
                # for chan in range(nchan):
                #     for pol in range(npol):
                #         cf.data[chan, pol, z, y, x, :, :] = paddedplane.data[chan, pol, :, :][vv, :][:, uu]

    cf.data /= numpy.sum(
        numpy.real(cf.data[0, 0, nw // 2, oversampling // 2,
                           oversampling // 2, :, :]))
    cf.data = numpy.conjugate(cf.data)

    if use_aaf:
        pswf_gcf, _ = create_pswf_convolutionfunction(im,
                                                      oversampling=1,
                                                      support=6)
    else:
        pswf_gcf = create_empty_image_like(im)
        pswf_gcf.data[...] = 1.0

    return pswf_gcf, cf
Beispiel #53
0
 def __mul__(self, other):
     if isinstance(other, Wave):
         # Frobenius Product
         return np.trace(np.product(self.wave, np.conjugate(other.wave)))
     if isinstance(other, float):
         return np.multiply(self.wave, other)
Beispiel #54
0
def evolution_strength_of_connection(A,
                                     B=None,
                                     epsilon=4.0,
                                     k=2,
                                     proj_type="l2",
                                     block_flag=False,
                                     symmetrize_measure=True):
    """Evolution Strength Measure.

    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : csr_matrix, bsr_matrix
        Sparse NxN matrix
    B : string, array
        If B=None, then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : boolean
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : csr_matrix
        Sparse matrix of strength values

    See [2008OlScTu]_ for more details.

    References
    ----------
    .. [2008OlScTu] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))

    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B is None:
        Bmat = np.ones((A.shape[0], 1), dtype=A.dtype)
    else:
        Bmat = np.asarray(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix(
                (Dinv, np.arange(Dinv.shape[0]), np.arange(Dinv.shape[0] + 1)),
                shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    Id = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (Id - (1.0 / rho_DinvA) * Dinv_A)
    Atilde = Atilde.T.tocsr()

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(np.arange(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength "
             "Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde * Atilde

        JacobiStep = (Id - (1.0 / rho_DinvA) * Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde * JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare - 1):
            Atilde = Atilde * Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.multiply(np.real(Atilde.data), np.real(data)) +\
            np.multiply(np.imag(Atilde.data), np.imag(data))
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data / data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(np.arange(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(Bmat[:, i])) * np.ravel(D_A * Bmat[:, j]))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float64).eps
        feps = np.finfo(np.float32).eps
        geps = np.finfo(np.float128).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(Atilde.data, Atilde.indptr,
                                           Atilde.indices, Atilde.shape[0],
                                           np.ravel(Bmat),
                                           np.ravel((D_A * B.conj()).T),
                                           np.ravel(BDB), BDBCols, NullDim,
                                           tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5 * (Atilde + Atilde.T)

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    Id = sparse.eye(dimen, dimen, format="csr")
    Id.data -= Atilde.diagonal()
    Atilde = Atilde + Id

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks, ))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(int(Atilde.shape[0] / numPDEs),
                                          int(Atilde.shape[1] / numPDEs)))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0 / Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Beispiel #55
0
def PEAK_MIMO(w_start, w_end, error_poles_direction, wr, deadtime_if=0):
    '''
    This function is for multivariable system analysis of controllability.
    gives:
    minimum peak values on S and T with or without deadtime
    R is the expected worst case reference change, with condition that ||R||2<= 2
    wr is the frequency up to where reference tracking is required
    enter value of 1 in deadtime_if if system has dead time
    
    Parameters
    ----------
    var : type
        Description (optional).

    Returns
    -------
    var : type
        Description.
    '''

    # TODO use mimotf functions
    Zeros_G = zeros(G)
    Poles_G = poles(G)
    print('Poles: ', Zeros_G)
    print('Zeros: ', Poles_G)

    #just to save unnecessary calculations that is not needed
    #sensitivity peak of closed loop. eq 6-8 pg 224 skogestad

    if np.sum(Zeros_G) != 0:
        if np.sum(Poles_G) != 0:

            #two matrices to save all the RHP zeros and poles directions
            yz_direction = np.matrix(
                np.zeros([G(0.001).shape[0], len(Zeros_G)]))
            yp_direction = np.matrix(
                np.zeros([G(0.001).shape[0], len(Poles_G)]))

            for i in range(len(Zeros_G)):

                [U, S,
                 V] = np.linalg.svd(G(Zeros_G[i] + error_poles_direction))
                yz_direction[:, i] = U[:, -1]

            for i in range(len(Poles_G)):
                #error_poles_direction is to to prevent the numerical method from breaking
                [U, S,
                 V] = np.linalg.svd(G(Poles_G[i] + error_poles_direction))
                yp_direction[:, i] = U[:, 0]

            yz_mat1 = np.matrix(np.diag(Zeros_G)) * np.matrix(
                np.ones([len(Zeros_G), len(Zeros_G)]))
            yz_mat2 = yz_mat1.T

            Qz = (yz_direction.H * yz_direction) / (yz_mat1 + yz_mat2)

            yp_mat1 = np.matrix(np.diag(Poles_G)) * np.matrix(
                np.ones([len(Poles_G), len(Poles_G)]))
            yp_mat2 = yp_mat1.T

            Qp = (yp_direction.H * yp_direction) / (yp_mat1 + yp_mat2)

            yzp_mat1 = np.matrix(np.diag(Zeros_G)) * np.matrix(
                np.ones([len(Zeros_G), len(Poles_G)]))
            yzp_mat2 = np.matrix(np.ones(
                [len(Zeros_G), len(Poles_G)])) * np.matrix(np.diag(Poles_G))

            Qzp = yz_direction.H * yp_direction / (yzp_mat1 - yzp_mat2)

            if deadtime_if == 0:
                #this matrix is the matrix from which the SVD is going to be done to determine the final minimum peak
                pre_mat = (sc_lin.sqrtm((np.linalg.inv(Qz))) * Qzp *
                           (sc_lin.sqrtm(np.linalg.inv(Qp))))

                #final calculation for the peak value
                Ms_min = np.sqrt(1 + (np.max(np.linalg.svd(pre_mat)[1]))**2)
                print('')
                print('Minimum peak values on T and S without deadtime')
                print('Ms_min = Mt_min = ', Ms_min)
                print('')

            #Skogestad eq 6-16 pg 226 using maximum deadtime per output channel to give tightest lowest bounds
            if deadtime_if == 1:
                #create vector to be used for the diagonal deadtime matrix containing each outputs' maximum dead time
                #this would ensure tighter bounds on T and S
                #the minimum function is used because all stable systems have dead time with a negative sign

                dead_time_vec_max_row = np.zeros(deadtime()[0].shape[0])

                for i in range(deadtime()[0].shape[0]):
                    dead_time_vec_max_row[i] = np.max(deadtime()[0][i, :])

                def Dead_time_matrix(s, dead_time_vec_max_row):

                    dead_time_matrix = np.diag(
                        np.exp(np.multiply(dead_time_vec_max_row, s)))
                    return dead_time_matrix

                Q_dead = np.zeros([G(0.0001).shape[0], G(0.0001).shape[0]])

                for i in range(len(Poles_G)):
                    for j in range(len(Poles_G)):
                        denominator_mat = np.transpose(
                            np.conjugate(
                                yp_direction[:, i])) * Dead_time_matrix(
                                    Poles_G[i],
                                    dead_time_vec_max_row) * Dead_time_matrix(
                                        Poles_G[j], dead_time_vec_max_row
                                    ) * yp_direction[:, j]
                        numerator_mat = Poles_G[i] + Poles_G[i]

                        Q_dead[i, j] = denominator_mat / numerator_mat

                #calculating the Mt_min with dead time
                lambda_mat = sc_lin.sqrtm(np.linalg.pinv(Q_dead)) * (
                    Qp + Qzp * np.linalg.pinv(Qz) *
                    (np.transpose(np.conjugate(Qzp)))) * sc_lin.sqrtm(
                        np.linalg.pinv(Q_dead))

                Ms_min = np.real(np.max(np.linalg.eig(lambda_mat)[0]))
                print('')
                print('Minimum peak values on T and S without dead time')
                print(
                    'Dead time per output channel is for the worst case dead time in that channel'
                )
                print('Ms_min = Mt_min = ', Ms_min)
                print('')

        else:
            print('')
            print('Minimum peak values on T and S')
            print('No limits on minimum peak values')
            print('')

    #check for dead time
    #dead_G = deadtime[0]
    #dead_gd = deadtime[1]

    #if np.sum(dead_G)!= 0:
    #therefore deadtime is present in the system therefore extra precautions need to be taken
    #manually set up the dead time matrix

    #    dead_m = np.zeros([len(Poles_G), len(Poles_G)])

    #    for i in range(len(Poles_G)):
    #        for j in range(len(Poles_G))
    #            dead_m

    #eq 6-48 pg 239 for plant with RHP zeros
    #checking alignment of disturbances and RHP zeros
    RHP_alignment = [
        np.abs(
            np.linalg.svd(G(RHP_Z + error_poles_direction))[0][:, 0].H *
            np.linalg.svd(Gd(RHP_Z + error_poles_direction))[1][0] *
            np.linalg.svd(Gd(RHP_Z + error_poles_direction))[0][:, 0])
        for RHP_Z in Zeros_G
    ]

    print('Checking alignment of process output zeros to disturbances')
    print('These values should be less than 1')
    print(RHP_alignment)
    print('')

    #checking peak values of KS eq 6-24 pg 229 np.linalg.svd(A)[2][:, 0]
    #done with less tight lower bounds
    KS_PEAK = [
        np.linalg.norm(
            np.linalg.svd(G(RHP_p + error_poles_direction))[2][:, 0].H *
            np.linalg.pinv(G(RHP_p + error_poles_direction)), 2)
        for RHP_p in Poles_G
    ]
    KS_max = np.max(KS_PEAK)

    print('Lower bound on K')
    print('KS needs to larger than ', KS_max)
    print('')

    #eq 6-50 pg 240 from Skogestad
    #eg 6-50 pg 240 from Skogestad for simultanious disturbance matrix
    #Checking input saturation for perfect control for disturbance rejection
    #checking for maximum disturbance just at steady state

    [U_gd, S_gd, V_gd] = np.linalg.svd(Gd(0.000001))
    y_gd_max = np.max(S_gd) * U_gd[:, 0]
    mod_G_gd_ss = np.max(np.linalg.inv(G(0.000001)) * y_gd_max)

    print('Perfect control input saturation from disturbances')
    print('Needs to be less than 1 ')
    print('Max Norm method')
    print('Checking input saturation at steady state')
    print('This is done by the worse output direction of Gd')
    print(mod_G_gd_ss)
    print('')

    #
    #
    #

    print('Figure 1 is for perfect control for simultaneous disturbances')
    print('All values on each of the graphs should be smaller than 1')
    print('')

    print('Figure 2 is the plot of G**1 gd')
    print('The values of this plot needs to be smaller or equal to 1')
    print('')

    w = np.logspace(w_start, w_end, 100)

    mod_G_gd = np.zeros(len(w))
    mod_G_Gd = np.zeros([np.shape(G(0.0001))[0], len(w)])

    for i in range(len(w)):
        [U_gd, S_gd, V_gd] = np.linalg.svd(Gd(1j * w[i]))
        gd_m = np.max(S_gd) * U_gd[:, 0]
        mod_G_gd[i] = np.max(np.linalg.pinv(G(1j * w[i])) * gd_m)

        mat_G_Gd = np.linalg.pinv(G(w[i])) * Gd(w[i])
        for j in range(np.shape(mat_G_Gd)[0]):
            mod_G_Gd[j, i] = np.max(mat_G_Gd[j, :])

    #def for subplotting all the possible variations of mod_G_Gd

    plot_freq_subplot(plt, w, np.ones([2, len(w)]), 'Perfect control Gd', 'r',
                      1)
    plot_freq_subplot(plt, w, mod_G_Gd, 'Perfect control Gd', 'b', 1)

    plt.figure(2)
    plt.title('Input Saturation for perfect control |inv(G)*gd|<= 1')
    plt.xlabel('w')
    plt.ylabel('|inv(G)* gd|')
    plt.semilogx(w, mod_G_gd)
    plt.semilogx([w[0], w[-1]], [1, 1])
    plt.semilogx(w[0], 1.1)

    #def G_gd(w):
    #    [U_gd, S_gd, V_gd] = np.linalg.svd(Gd(1j*w))
    #    gd_m = U_gd[:, 0]
    #    mod_G_gd[i] = np.max(np.linalg.inv(G(1j*w))*gd_m)-1
    #    return mod_G_gd

    #w_mod_G_gd_1 = sc_opt.fsolve(G_gd, 0.001)

    #print 'frequencies up to which input saturation would not occur'
    #print w_mod_G_gd_1

    print('Figure 3 is disturbance condition number')
    print(
        'A large number indicates that the disturbance is in a bad direction')
    print('')
    #eq 6-43 pg 238 disturbance condition number
    #this in done over a frequency range to see if there are possible problems at higher frequencies
    #finding yd

    dist_condition_num = [
        np.linalg.svd(G(w_i))[1][0] * np.linalg.svd(
            np.linalg.pinv(G(w_i))[1][0] * np.linalg.svd(Gd(w_i))[1][0] *
            np.linalg.svd(Gd(w_i))[0][:, 0])[1][0] for w_i in w
    ]

    plt.figure(3)
    plt.title('yd Condition number')
    plt.ylabel('condition number')
    plt.xlabel('w')
    plt.loglog(w, dist_condition_num)

    #
    #
    #

    print(
        'Figure 4 is the singular value of an specific output with input and disturbance direction vector'
    )
    print('The solid blue line needs to be large than the red line')
    print('This only needs to be checked up to frequencies where |u**H gd| >1')
    print('')

    #checking input saturation for acceptable control  disturbance rejection
    #equation 6-55 pg 241 in Skogestad
    #checking each singular values and the associated input vector with output direction vector of Gd
    #just for square systems for now

    #revised method including all the possibilities of outputs i
    store_rhs_eq = np.zeros([np.shape(G(0.0001))[0], len(w)])
    store_lhs_eq = np.zeros([np.shape(G(0.0001))[0], len(w)])

    for i in range(len(w)):
        for j in range(np.shape(G(0.0001))[0]):
            store_rhs_eq[j, i] = np.abs(
                np.linalg.svd(G(w[i]))[2][:, j].H *
                np.max(np.linalg.svd(Gd(w[i]))[1]) *
                np.linalg.svd(Gd(w[i]))[0][:, 0]) - 1
            store_lhs_eq[j, i] = sc_lin.svd(G(w[i]))[1][j]

    plot_freq_subplot(plt, w, store_rhs_eq, 'Acceptable control eq6-55', 'r',
                      4)
    plot_freq_subplot(plt, w, store_lhs_eq, 'Acceptable control eq6-55', 'b',
                      4)

    #
    #
    #

    print('Figure 5 is to check input saturation for reference changes')
    print(
        'Red line in both graphs needs to be larger than the blue line for values w < wr'
    )
    print('Shows the wr up to where control is needed')
    print('')

    #checking input saturation for perfect control with reference change
    #eq 6-52 pg 241

    #checking input saturation for perfect control with reference change
    #another equation for checking input saturation with reference change
    #eq 6-53 pg 241

    plt.figure(5)
    ref_perfect_const_plot(G, reference_change(), 0.01, w_start, w_end)

    print(
        'Figure 6 is the maximum and minimum singular values of G over a frequency range'
    )
    print(
        'Figure 6 is also the maximum and minimum singular values of Gd over a frequency range'
    )
    print('Blue is the minimum values and Red is the maximum singular values')
    print(
        'Plot of Gd should be smaller than 1 else control is needed at frequencies where Gd is bigger than 1'
    )
    print('')

    #checking input saturation for acceptable control with reference change
    #added check for controllability is the minimum and maximum singular values of system transfer function matrix
    #as a function of frequency
    #condition number added to check for how prone the system would be to uncertainty

    singular_min_G = [np.min(np.linalg.svd(G(1j * w_i))[1]) for w_i in w]
    singular_max_G = [np.max(np.linalg.svd(G(1j * w_i))[1]) for w_i in w]
    singular_min_Gd = [np.min(np.linalg.svd(Gd(1j * w_i))[1]) for w_i in w]
    singular_max_Gd = [np.max(np.linalg.svd(Gd(1j * w_i))[1]) for w_i in w]
    condition_num_G = [
        np.max(np.linalg.svd(G(1j * w_i))[1]) /
        np.min(np.linalg.svd(G(1j * w_i))[1]) for w_i in w
    ]

    plt.figure(6)
    plt.subplot(311)
    plt.title('min_S(G(jw)) and max_S(G(jw))')
    plt.loglog(w, singular_min_G, 'b')
    plt.loglog(w, singular_max_G, 'r')

    plt.subplot(312)
    plt.title('Condition number of G')
    plt.loglog(w, condition_num_G)

    plt.subplot(313)
    plt.title('min_S(Gd(jw)) and max_S(Gd(jw))')
    plt.loglog(w, singular_min_Gd, 'b')
    plt.loglog(w, singular_max_Gd, 'r')
    plt.loglog([w[0], w[-1]], [1, 1])

    plt.show()

    return Ms_min
Beispiel #56
0
def berechnung_koef_entwicklung(delta_x, EV, phi):
    """Funktion zur Berechnung der Etwicklungsfunktionen laut Vorlesung.
    Die Numpy-Funktionn conjugate und transpose werden verwendet um die
     benötigte komplexe Konjugation zu realisieren. 
    """
    return delta_x * np.dot(np.conjugate(np.transpose(EV)), phi)
Beispiel #57
0
 def outf(m):
     if time_reversal: m2 = m2spin_sparse(m, np.conjugate(m))  # spinful
     else: m2 = m2spin_sparse(m)  # spinful
     #        m2 = m2spin_sparse(m) # spinful
     return build_eh(m2)  # add e-h
Beispiel #58
0
def multi_ldos(h,es=np.linspace(-1.0,1.0,100),delta=0.01,nrep=3,nk=100,numw=3,
        random=False,op=None):
  """Calculate many LDOS, by diagonalizing the Hamiltonian"""
  print("Calculating eigenvectors in LDOS")
  ps = [] # weights
  evals,ws = [],[] # empty list
  ks = klist.kmesh(h.dimensionality,nk=nk) # get grid
  hk = h.get_hk_gen() # get generator
  op = operators.tofunction(op) # turn into a function
#  if op is None: op = lambda x,k: 1.0 # dummy function
  if h.is_sparse: # sparse Hamiltonian
    from .bandstructure import smalleig
    print("SPARSE Matrix")
    for k in ks: # loop
      print("Diagonalizing in LDOS, SPARSE mode")
      if random:
        k = np.random.random(3) # random vector
        print("RANDOM vector in LDOS")
      e,w = smalleig(hk(k),numw=numw,evecs=True)
      evals += [ie for ie in e]
      ws += [iw for iw in w]
      ps += [op(iw,k=k) for iw in w] # weights
#      evals = np.concatenate([evals,e]) # store
#      ws = np.concatenate([ws,w]) # store
#    raise
#    (evals,ws) = h.eigenvectors(nk) # get the different eigenvectors
  else:
    print("Diagonalizing in LDOS, DENSE mode")
    for k in ks: # loop
      if random:
        k = np.random.random(3) # random vector
        print("RANDOM vector in LDOS")
      e,w = lg.eigh(hk(k))
      w = w.transpose()
      evals += [ie for ie in e]
      ws += [iw for iw in w]
      ps += [op(iw,k=k[0]) for iw in w] # weights
#      evals = np.concatenate([evals,e]) # store
#      ws = np.concatenate([ws,w]) # store
  ds = [(np.conjugate(v)*v).real for v in ws] # calculate densities
  del ws # remove the wavefunctions
  os.system("rm -rf MULTILDOS") # remove folder
  os.system("mkdir MULTILDOS") # create folder
  go = h.geometry.copy() # copy geometry
  go = go.supercell(nrep) # create supercell
  fo = open("MULTILDOS/MULTILDOS.TXT","w") # files with the names
  def getldosi(e):
    """Get this iteration"""
    out = np.array([0.0 for i in range(h.intra.shape[0])]) # initialize
    for (d,p,ie) in zip(ds,ps,evals): # loop over wavefunctions
      fac = delta/((e-ie)**2 + delta**2) # factor to create a delta
      out += fac*d*p # add contribution
    out /= np.pi # normalize
    return spatial_dos(h,out) # resum if necessary
  outs = parallel.pcall(getldosi,es) # get energies
  ie = 0
  for e in es: # loop over energies
    print("MULTILDOS for energy",e)
    out = outs[ie] ; ie += 1 # get and increase
    name0 = "LDOS_"+str(e)+"_.OUT" # name of the output
    name = "MULTILDOS/" + name0
    write_ldos(go.x,go.y,out.tolist()*(nrep**h.dimensionality),
                  output_file=name) # write in file
    fo.write(name0+"\n") # name of the file
    fo.flush() # flush
  fo.close() # close file
  # Now calculate the DOS
  from .dos import calculate_dos
  es2 = np.linspace(min(es),max(es),len(es)*10)
  ys = calculate_dos(evals,es2,delta,w=None) # compute DOS
  from .dos import write_dos
  write_dos(es2,ys,output_file="MULTILDOS/DOS.OUT")  
Beispiel #59
0
 def outf(m):
     if time_reversal:
         return m2spin_sparse(m, np.conjugate(m))  # spinful
     else:
         return m2spin_sparse(m)  # spinful
    def __init__(self, wf, feature_name):
        """
        Calculates amplitude of rotated worm (relies on orientation
        aka theta_d)

        Parameters
        ----------
        theta_d
        sx
        sy
        worm_lengths

        """

        self.name = feature_name
        theta_d = self.get_feature(
            wf, 'posture.eccentricity_and_orientation').orientation

        timer = wf.timer
        timer.tic()

        options = wf.options

        nw = wf.nw
        sx = nw.skeleton_x
        sy = nw.skeleton_y
        worm_lengths = nw.length

        # TODO: Move these into posture options

        wave_options = wf.options.posture.wavelength

        # https://github.com/JimHokanson/SegwormMatlabClasses/blob/master/
        # %2Bseg_worm/%2Bfeatures/%40posture/getAmplitudeAndWavelength.m
        N_POINTS_FFT = wave_options.n_points_fft
        HALF_N_FFT = int(N_POINTS_FFT / 2)
        MIN_DIST_PEAKS = wave_options.min_dist_peaks
        WAVELENGTH_PCT_MAX_CUTOFF = wave_options.pct_max_cutoff
        WAVELENGTH_PCT_CUTOFF = wave_options.pct_cutoff

        assert sx.shape[0] <= N_POINTS_FFT  # of points used in the FFT
        # must be more than the number of points in the skeleton

        # Rotate the worm so that it lies primarily along a single axis
        #-------------------------------------------------------------
        theta_r = theta_d * (np.pi / 180)
        wwx = sx * np.cos(theta_r) + sy * np.sin(theta_r)
        wwy = sx * -np.sin(theta_r) + sy * np.cos(theta_r)

        # Subtract mean
        #-----------------------------------------------------------------
        #??? - Why isn't this done before the rotation?
        wwx = wwx - np.mean(wwx, axis=0)
        wwy = wwy - np.mean(wwy, axis=0)

        # Calculate track amplitude
        #-----------------------------------------------------------------
        amp1 = np.amax(wwy, axis=0)
        amp2 = np.amin(wwy, axis=0)
        amplitude_max = amp1 - amp2
        amp2 = np.abs(amp2)

        # Ignore NaN division warnings
        with np.errstate(invalid='ignore'):
            amplitude_ratio = np.divide(np.minimum(amp1, amp2),
                                        np.maximum(amp1, amp2))

        # Calculate track length
        #-----------------------------------------------------------------
        # This is the x distance after rotation, and is different from the
        # worm length which follows the skeleton. This will always be smaller
        # than the worm length. If the worm were perfectly straight these
        # values would be the same.
        track_length = np.amax(wwx, axis=0) - np.amin(wwx, axis=0)

        # Wavelength calculation
        #-----------------------------------------------------------------
        dwwx = np.diff(wwx, 1, axis=0)

        # Does the sign change? This is a check to make sure that the
        # change in x is always going one way or the other. Is sign of all
        # differences the same as the sign of the first, or rather, are any
        # of the signs not the same as the first sign, indicating a "bad
        # worm orientation".
        #
        # NOT: This means that within a frame, if the worm x direction
        #      changes, then it is considered a bad worm and is not
        #      evaluated for wavelength
        #

        with np.errstate(invalid='ignore'):
            bad_worm_orientation = np.any(np.not_equal(np.sign(dwwx),
                                                       np.sign(dwwx[0, :])),
                                          axis=0)

        n_frames = bad_worm_orientation.size
        primary_wavelength = np.full(n_frames, np.nan)
        secondary_wavelength = np.full(n_frames, np.nan)

        # NOTE: Right now this varies from worm to worm which means the
        # spectral resolution varies as well from worm to worm
        spatial_sampling_frequency = (wwx.shape[0] - 1) / track_length

        ds = 1 / spatial_sampling_frequency

        frames_to_calculate = \
            (np.logical_not(bad_worm_orientation)).nonzero()[0]

        for cur_frame in frames_to_calculate:
            # Create an evenly sampled x-axis, note that ds varies
            xx = wwx[:, cur_frame]
            yy = wwy[:, cur_frame]
            if xx[0] > xx[
                    -1]:  #switch we want to have monotonically inceasing values
                xx = xx[::-1]
                yy = yy[::-1]

            iwwx = utils.colon(xx[0], ds[cur_frame], xx[-1])
            iwwy = np.interp(iwwx, xx, yy)
            iwwy = iwwy[::-1]

            temp = np.fft.fft(iwwy, N_POINTS_FFT)

            if options.mimic_old_behaviour:
                iY = temp[0:HALF_N_FFT]
                iY = iY * np.conjugate(iY) / N_POINTS_FFT
            else:
                iY = np.abs(temp[0:HALF_N_FFT])

            # Find peaks that are greater than the cutoff
            peaks, indx = utils.separated_peaks(
                iY, MIN_DIST_PEAKS, True,
                (WAVELENGTH_PCT_MAX_CUTOFF * np.amax(iY)))

            # This is what the supplemental says, not what was done in
            # the previous code. I'm not sure what was done for the actual
            # paper, but I would guess they used power.
            #
            # This gets used when determining the secondary wavelength, as
            # it must be greater than half the maximum to be considered a
            # secondary wavelength.

            # NOTE: True Amplitude = 2*abs(fft)/
            #                    (length_real_data i.e. 48 or 49, not 512)
            #
            # i.e. for a sinusoid of a given amplitude, the above formula
            # would give you the amplitude of the sinusoid

            # We sort the peaks so that the largest is at the first index
            # and will be primary, this was not done in the previous
            # version of the code
            I = np.argsort(-1 * peaks)
            indx = indx[I]

            frequency_values = (indx - 1) / N_POINTS_FFT * \
                spatial_sampling_frequency[cur_frame]

            all_wavelengths = 1 / frequency_values

            p_temp = all_wavelengths[0]

            if indx.size > 1:
                s_temp = all_wavelengths[1]
            else:
                s_temp = np.NaN

            worm_wavelength_max = (WAVELENGTH_PCT_CUTOFF *
                                   worm_lengths[cur_frame])

            # Cap wavelengths ...
            if p_temp > worm_wavelength_max:
                p_temp = worm_wavelength_max

            # ??? Do we really want to keep this as well if p_temp == worm_2x?
            # i.e., should the secondary wavelength be valid if the primary is
            # also limited in this way ?????
            if s_temp > worm_wavelength_max:
                s_temp = worm_wavelength_max

            primary_wavelength[cur_frame] = p_temp
            secondary_wavelength[cur_frame] = s_temp

        if options.mimic_old_behaviour:
            # In the old code, the first peak (i.e. larger wavelength,
            # lower frequency) was always the primary wavelength, where as
            # the new definition is based on the amplitude of the peaks,
            # not their position along the frequency axis
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                mask = secondary_wavelength > primary_wavelength

            temp = secondary_wavelength[mask]
            secondary_wavelength[mask] = primary_wavelength[mask]
            primary_wavelength[mask] = temp

        self.amplitude_max = amplitude_max
        self.amplitude_ratio = amplitude_ratio
        self.primary_wavelength = primary_wavelength
        self.secondary_wavelength = secondary_wavelength
        self.track_length = track_length

        timer.toc('posture.amplitude_and_wavelength')