Пример #1
0
 def __init__(self,s1pPath,delay=0.0):
     assert s1pPath.endswith('.s1p')
     self.title = re.split('/',s1pPath)[-1]
     self.frequencies = [] # in Hz
     self.s11Values = [] # as complex, linear values
     
     touchstoneFileHandle = open(s1pPath,'r',1)
     for line in touchstoneFileHandle:
         if line[0] == '!':
             pass # comment, ignore for the moment
         elif line[0] == '#':
             assert line.endswith('hz S db R 50\n')
         else: # should be a data line
             splittedLine = re.split('\s+',line)
             floatItems = []
             for item in splittedLine:
                 try:
                     floatItems.append(float(item))
                 except ValueError:
                     pass
             
             assert len(floatItems) == 3 # we only support S1P files for the moment
             self.frequencies.append(floatItems[0])
             
             s11angle = numpy.exp(numpy.complex(0,numpy.deg2rad(floatItems[2])))
             s11magnitude = 10.0**(floatItems[1]/20.0)
             
             self.s11Values.append(s11magnitude*s11angle)
     
     self.frequencies = numpy.array(self.frequencies)
     self.s11Values = numpy.array(self.s11Values)
     # compensate electrical delay
     self.s11Values = numpy.exp(numpy.complex(0,1)*2*2*numpy.pi*self.frequencies*delay) * self.s11Values
Пример #2
0
 def xdatan(self):
     if self.is_numerical:
         return self.xdata
     else:
         return (numpy.complex(self.center),
                 numpy.complex(self.xcoefficient),
                 numpy.int(self.ramification_index))
Пример #3
0
def getftzer(Jzer,ngrid=128,Rpix=100):
    '''
    ; Compute the Fourier Transform of Zernike mode

    ; ngrid = 128 ; grid half-size, pixels
    ; Rpix = 100 ; pupil radius in pixels

    :param Jzer:
    :return:
    '''

    x = np.arange(-ngrid,ngrid)
    y = np.arange(-ngrid,ngrid)
    theta = np.arctan2(x,y)

    n,m = zern_num(Jzer)
    f = np.roll(np.roll(dist(2*ngrid),
                        ngrid,
                        axis=0),
                ngrid,
                axis=1)/(2*ngrid)*Rpix
    f[ngrid][ngrid] = 1e-3

    ftmod = np.sqrt(n+1.0)*jv(n+1,2*np.pi*f)/(np.pi*f)

    if m == 0:
        zz = ftmod*np.complex(0,1.)**(n/2.)
    else:
        if (Jzer%2 == 0):
            fact=np.sqrt(2.)*np.cos(m*theta)
        else:
            fact=np.sqrt(2.)*np.sin(m*theta)
        zz = ftmod*fact*(-1)**((n-m/2.))*np.complex(0,1.)**m

    return zz
Пример #4
0
    def pherr(self, i, phi):
        """add phase error to antenna i
        """

        for j in xrange(self.na):
            self.data[i,j] = self.data[i,j] * n.exp(n.complex(0.,n.sin(phi)))
            self.data[j,i] = self.data[j,i] * n.exp(n.complex(0.,n.sin(-phi)))
Пример #5
0
    def plot_error_map(self, point_error, ele_pos):
        """
        Creates plot of mean error calculated separately for every point of
        estimation space

        Parameters
        ----------
        point_error: numpy array
            Error of reconstruction calculated at every point of reconstruction
            space.
        ele_pos: numpy array
            Positions of electrodes.

        Returns
        -------
        mean_error: numpy array
            Accuracy mask.
        """
        ele_x, ele_y = ele_pos[:, 0], ele_pos[:, 1]
        x, y = np.mgrid[self.kcsd_xlims[0]:self.kcsd_xlims[1]:
                        np.complex(0, point_error.shape[1]),
                        self.kcsd_ylims[0]:self.kcsd_ylims[1]:
                        np.complex(0, point_error.shape[2])]
        mean_error = self.sigmoid_mean(point_error)
        plt.figure(figsize=(12, 7))
        ax1 = plt.subplot(111, aspect='equal')
        levels = np.linspace(0, 1., 25)
        im = ax1.contourf(x, y, mean_error, cmap='Greys')
        plt.colorbar(im)#im, fraction=0.046, pad=0.06)
        plt.scatter(ele_x, ele_y, 10, c='k')
        ax1.set_xlabel('Depth x [mm]')
        ax1.set_ylabel('Depth y [mm]')
        ax1.set_title('Sigmoidal mean point error')
        plt.show()
        return mean_error
Пример #6
0
 def get_transfer_matrix_mine(self, index, energy):
     if energy == self.data[index + 1].height:
         km_ratio = np.sqrt(
             self.data[index].mass
             * np.complex(energy - self.data[index].height)
             / self.data[index + 1].mass
             / FAKE_ZERO
         )
     else:
         km_ratio = np.sqrt(
             self.data[index].mass
             * np.complex(energy - self.data[index].height)
             / self.data[index + 1].mass
             / np.complex(energy - self.data[index + 1].height)
         )
     kcp = 1 + km_ratio
     kcm = 1 - km_ratio
     d_this = self.get_delta(index, energy)
     d_next = self.get_delta(index + 1, energy)
     kd = self.get_wave_number(index, energy) * self.data[index].width
     # print "d_this =", d_this, "d_next =", d_next, "kd", kd, "1j * ( d_this + d_next - kd) =", 1j * ( d_this + d_next - kd)
     return np.matrix(
         [
             [kcp / 2 * np.exp(1j * (d_this - d_next - kd)), kcm / 2 * np.exp(1j * (d_this + d_next - kd))],
             [kcp / 2 * np.exp(-1j * (d_this + d_next - kd)), kcm / 2 * np.exp(-1j * (d_this - d_next - kd))],
         ]
     )
Пример #7
0
    def calclogI(self):
        """
        The logarithm intensity function.

        Returns:
        Numpy.complex data type representing the value of the logarithm of the intensity function.
        """
        ret=numpy.complex(0.,0.)
        for n in range(0,len(self.alphaList)-1,1):    
            argret=numpy.complex(0.,0.)            
            for wave1 in self.waves:
                for wave2 in self.waves:
                    if len(self.productionAmplitudes)!=0:
                                #logarithmic domain error
                        arg = self.productionAmplitudes[self.waves.index(wave1)]*numpy.conjugate(self.productionAmplitudes[self.waves.index(wave2)])*wave1.complexamplitudes[n]*numpy.conjugate(wave2.complexamplitudes[n])*spinDensity(self.beamPolarization,self.alphaList[n])[wave1.epsilon,wave2.epsilon]
                        argret+=arg
            argret=argret.real
            if self.debugPrinting==1:                        
                print"loop#",n,"="*10
                print"argval:",arg
                print"argtype:",type(arg)
                print"productionAmps1:",self.productionAmplitudes[self.waves.index(wave1)]
                print"productionAmps2*:",numpy.conjugate(self.productionAmplitudes[self.waves.index(wave2)])
                print"spinDensityValue:",spinDensity(self.beamPolarization,self.alphaList[n])[wave1.epsilon,wave2.epsilon]
                print"A1:",wave1.complexamplitudes[n]                        
                print"A2*:",numpy.conjugate(wave2.complexamplitudes[n])
            if argret > 0.:                        
                ret+=log(argret)
            
            self.iList.append(argret)                           
        return ret
Пример #8
0
def uniform_seed_grid():

    #read bvals,gradients and data   
    fimg,fbvals, fbvecs = get_data('small_64D')    
    bvals=np.load(fbvals)
    gradients=np.load(fbvecs)
    img =ni.load(fimg)    
    data=img.get_data()
    
    x,y,z,g=data.shape   

    M=np.mgrid[.5:x-.5:np.complex(0,x),.5:y-.5:np.complex(0,y),.5:z-.5:np.complex(0,z)]
    M=M.reshape(3,x*y*z).T

    print(M.shape)
    print(M.dtype)

    for m in M: 
        print(m)
    gqs = GeneralizedQSampling(data,bvals,gradients)
    iT=iter(EuDX(gqs.QA,gqs.IN,seeds=M))    
    T=[]
    for t in iT:
        T.append(i)
    
    print('lenT',len(T))
    assert_equal(len(T), 1221)
Пример #9
0
def cubic(c, d):
    '''
    Solve x**3 + c * x + d = 0
    '''

    c = c.astype(np.complex)
    d = d.astype(np.complex)

    q = c / 3.
    r = - d / 2.

    delta = q ** 3 + r ** 2

    pos = delta >= 0.

    s = np.zeros(c.shape, dtype=np.complex)
    t = np.zeros(c.shape, dtype=np.complex)

    if np.sum(pos) > 0:
        s[pos], t[pos] = delta_pos(r[pos], delta[pos])

    if np.sum(~pos) > 0:
        s[~pos], t[~pos] = delta_neg(r[~pos], q[~pos])

    x1 = s + t
    x2 = - (s + t) / 2. + np.sqrt(3.) / 2. * (s - t) * np.complex(0., 1.)
    x3 = - (s + t) / 2. - np.sqrt(3.) / 2. * (s - t) * np.complex(0., 1.)

    return x1, x2, x3
Пример #10
0
def init(f, g):
    k = 0
    while k < len(f):
        f[k] = np.complex(k, k + 1)
        g[k] = np.complex(k, 2 * k + 1)
        k += 1
    return
Пример #11
0
def hermitian_subspace_basis(n):
    """
    returns a basis set for the real subspace of Hermitian n*n matrices
    """
    sqrt2over2 = numpy.sqrt(2.)/2.
    assert(n)
    basis = []
    for row in range(0,n):
        for col in range(row,n):
            if row == col:
                mat = numpy.zeros((n,n), dtype=complex)
                mat[row][col] = 1.
                basis.append(mat)
            else:
                mat = numpy.zeros((n,n), dtype=complex)
                mat[col][row] = sqrt2over2
                mat[row][col] = sqrt2over2
                basis.append(mat)                
                mat = numpy.zeros((n,n), dtype=complex)
                mat[row][col] = numpy.complex(0.,sqrt2over2)
                mat[col][row] = numpy.complex(0.,-sqrt2over2)
                basis.append(mat)

    # unit vectors ?
#    for M in basis:
#        for F in basis:
#           print hs.dot(F,M)
        # assert hs.dot(M,M) == 1.
              
    return basis            
Пример #12
0
def make_invariant(points):
	print '*' * 50
	print 'make_invariant called'
	print '*' * 50
	print

	N = len(points)

	points = map(lambda x : np.complex(x[0], x[1]), points)
	FD = np.fft.fft(points)
	# translational invariance
	translational_invar = FD[0]
	FD = map(lambda x : x - translational_invar, FD)
	# scalar invariance 
	val = ( ( FD[1] )*( FD[1].conjugate() ) ).real
	FD = map(lambda x : x / val, FD)
	# rotational invariance
	final_points = []
	for i in range(N):
		prev_ = FD[(i-1)%N]
		next_ = FD[(i+1)%N]
		angle = ( 2 * math.pi * i )/N
		cmplx1 = np.complex(math.cos(angle), math.sin(angle))
		cmplx2 = cmplx1.conjugate()
		u_i = prev_*cmplx2 + next_*cmplx1
		final_points.append(u_i)

	phi = math.atan( final_points[1].imag / final_points[1].real )
	cmplx3 = np.complex(math.cos(phi), math.sin(phi))

	final_points = map( lambda x : [ (x*cmplx3).real, (x*cmplx3).imag], final_points)	

	print final_points
	return final_points
Пример #13
0
def IntegralLapLineDipoleDis(zin, z1, z2, del0, ra, order):
    cg = np.full(order + 2, np.complex(0.0, 0.0))

    z = (2.0 * zin - (z1 + z2)) / (z2 - z1)
    zplus1 = z + 1.0
    zmin1 = z - 1.0

    # Determine coefficients of powers of Delta for [ (Delta-Delta_0)/a ] ^ p
    for m in range(0, order + 1):
        cg[m] = RBINOM[order, m] * (-del0) ** (order - m) / ra ** order

    zterm1 = np.complex(0.0, 0.0)
    zterm2 = np.complex(0.0, 0.0)
    for n in range(1, order + 1):
        zterm1 = zterm1 + cg[n] * float(n) * z ** (n - 1)
    for n in range(0, order + 1):
        zterm2 = zterm2 + cg[n] * z ** n

    qmtot = np.complex(0.0, 0.0)
    for m in range(2, order + 1):
        qm = np.complex(0.0, 0.0)
        for n in range(1, int(m / 2) + 1):
            qm = qm + float(m - 2 * n + 1) * z ** (m - 2 * n) / float(2 * n - 1)
        qmtot = qmtot + 2.0 * cg[m] * qm

    wdis = (
        zterm1 * np.log(zmin1 / zplus1) + zterm2 * (1.0 / zmin1 - 1.0 / zplus1) + qmtot
    ) / (2.0 * np.pi)
    return wdis
Пример #14
0
def sdb2sri(Sdb, Sdeg):
	# convert DB/DEG to real/imag
	num_freqs = len(Sdb)
	Sri = np.zeros( (num_freqs, 2, 2), dtype=complex)

	for idx in range(len(Sdb)):
		db_mat = Sdb[idx]
		S11_db = db_mat[0][0]
		S12_db = db_mat[0][1]
		S21_db = db_mat[1][0]
		S22_db = db_mat[1][1]

		deg_mat = Sdeg[idx]
		S11_deg = deg_mat[0][0]
		S12_deg = deg_mat[0][1]
		S21_deg = deg_mat[1][0]
		S22_deg = deg_mat[1][1]

		S11 = 10**(S11_db/20) * np.complex( np.cos(S11_deg*np.pi/180), np.sin(S11_deg*np.pi/180) )
		S12 = 10**(S12_db/20) * np.complex( np.cos(S12_deg*np.pi/180), np.sin(S12_deg*np.pi/180) )
		S21 = 10**(S21_db/20) * np.complex( np.cos(S21_deg*np.pi/180), np.sin(S21_deg*np.pi/180) )
		S22 = 10**(S22_db/20) * np.complex( np.cos(S22_deg*np.pi/180), np.sin(S22_deg*np.pi/180) )

		Sri[idx][0][0] = S11
		Sri[idx][0][1] = S12
		Sri[idx][1][0] = S21
		Sri[idx][1][1] = S22

	return Sri
Пример #15
0
def getGroundState(psi0,V,a,b,dt,omega=1.0,delta=0.0,epsilon=0.048,phi=4.0/3.0,*args,**kwargs):
    dx=(b-a)/(psi0.shape[0])
    t0=time.clock()
    eigE,eigV,eigVdagger=getEigenHam2(a,b,psi0.shape[0],V,omega=omega,delta=delta,epsilon=epsilon,phi=phi,*args,**kwargs)
    t1=time.clock()
    print 'Got EigenHam in '+str(t1-t0)+' seconds!'
    psi0=psi0/np.sqrt(np.sum(psi0*np.conj(psi0)*dx))
    psi0eigB=np.einsum('ijk,ik->ij',eigVdagger,psi0)
    t0=time.clock()
    psi1eigB=splitStepPropagatorEigB2(psi0eigB,dt*np.complex(0.0,-1.0),a,b,eigE,eigV,eigVdagger)
    t1=time.clock()
    psi1eigB=psi1eigB/np.sqrt(np.sum(psi1eigB*np.conj(psi1eigB)*dx))
    t2=time.clock()
    print 'Completed one time step in '+str(t1-t0)+' seconds!'
    print 'Then normalized wavefunction in '+str(t2-t1)+' seconds!'
    diff=np.sum(np.abs(psi0eigB*np.conj(psi0eigB)*dx-psi1eigB*np.conj(psi1eigB)*dx))
    i=0
    while diff>0.1e-5:
        psi0eigB=psi1eigB
        psi1eigB=splitStepPropagatorEigB2(psi0eigB,dt*np.complex(0.0,-1.0),a,b,eigE,eigV,eigVdagger)
        psi1eigB=psi1eigB/np.sqrt(np.sum(psi1eigB*np.conj(psi1eigB)*dx))
        diffLast=diff
        diff=np.sum(np.abs(psi0eigB*np.conj(psi0eigB)*dx-psi1eigB*np.conj(psi1eigB)*dx))
        if diffLast<diff:
            print 'Not converging! Difference went up from %f to %f' %(diffLast,diff)
            break
        i+=1
    print i
    print diff
    psi1=np.einsum('ijk,ik->ij',eigV,psi1eigB)
    psi1=psi1.transpose()
    return psi1
Пример #16
0
def test_disbesldv():
    qxqyv = besselaesnew.disbesldv(2.0, 1.0, np.complex(-3.0, -1.0), np.complex(2.0, 2.0),
              [0.0, 2.0, 11.0], 1, 1, 3)
    assert_allclose(qxqyv[0], np.array([-0.17013114606375021, -0.18423853257632447, -0.17315784943727297]))
    assert_allclose(qxqyv[2], np.array([2.7440507429637e-002, 8.880686745447e-002, 3.426560831291e-002]))
    assert_allclose(qxqyv[1], np.array([-0.10412493484448178, -0.10844664064434061, -0.10447761803194042]))
    assert_allclose(qxqyv[3], np.array([0.10617613097471285, 0.11627387807684744, 0.10674211206906066]))
    def test_analytic_continuation_X1(self):
        gammax = ComplexLine(1,0)
        y0 = [-1,1]
        gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)

        y = gamma.get_y(0)
        self.assertAlmostEqual(y[0], -1)
        self.assertAlmostEqual(y[1], 1)

        y = gamma.get_y(0.5)
        self.assertAlmostEqual(y[0], -sqrt(complex(0.5)))
        self.assertAlmostEqual(y[1], sqrt(complex(0.5)))

        y = gamma.get_y(0.75)
        self.assertAlmostEqual(y[0], -sqrt(complex(0.25)))
        self.assertAlmostEqual(y[1], sqrt(complex(0.25)))

        y = gamma.get_y(1)
        self.assertAlmostEqual(y[0], 0)
        self.assertAlmostEqual(y[1], 0)

        gammax = ComplexArc(2,2,0,pi)
        y0 = [-2,2]
        gamma = RiemannSurfacePathPuiseux(self.X1, gammax, y0)

        y = gamma.get_y(0)
        self.assertAlmostEqual(y[0], -2)
        self.assertAlmostEqual(y[1], 2)

        y = gamma.get_y(1)
        self.assertAlmostEqual(y[0], 0)
        self.assertAlmostEqual(y[1], 0)
Пример #18
0
def load_data(filename,y1_col,y2_col,sformat='realimag',phase_conversion = 1,ampformat='lin',fdata_unit=1.,delimiter=None):
    '''
    sformat = 'realimag' or 'ampphase'
    ampformat = 'lin' or 'log'
    '''
    f = open(filename)
    lines = f.readlines()
    f.close()
    z_data = []
    f_data = []

    if sformat=='realimag':
        for line in lines:
            if ((line!="\n") and (line[0]!="#") and (line[0]!="!")) :
                lineinfo = line.split(delimiter)
                f_data.append(float(lineinfo[0])*fdata_unit)
                z_data.append(np.complex(float(lineinfo[y1_col]),float(lineinfo[y2_col])))
    elif sformat=='ampphase' and ampformat=='lin':
        for line in lines:
            if ((line!="\n") and (line[0]!="#") and (line[0]!="!") and (line[0]!="M") and (line[0]!="P")):
                lineinfo = line.split(delimiter)
                f_data.append(float(lineinfo[0])*fdata_unit)
                z_data.append(float(lineinfo[y1_col])*np.exp( np.complex(0.,phase_conversion*float(lineinfo[y2_col]))))
    elif sformat=='ampphase' and ampformat=='log':
        for line in lines:
            if ((line!="\n") and (line[0]!="#") and (line[0]!="!") and (line[0]!="M") and (line[0]!="P")):
                lineinfo = line.split(delimiter)
                f_data.append(float(lineinfo[0])*fdata_unit)
                linamp = 10**(float(lineinfo[y1_col])/20.)
                z_data.append(linamp*np.exp( np.complex(0.,phase_conversion*float(lineinfo[y2_col]))))
    else:
        print "ERROR"
    return np.array(f_data), np.array(z_data)
Пример #19
0
    def general_work(self, input_items, output_items):
        nread = self.nitems_read(0)  # number of items read on port 0
        ninput_items = len(input_items[0])
        noutput_items = len(output_items[0])
        nitems_to_consume = min(ninput_items, noutput_items)
        in0 = input_items[0]
        out0 = output_items[0]

        for ii in range(nitems_to_consume):
            x = in0[ii].real
            y = in0[ii].imag

            if x == 0 and y == 0:
                out0[ii] = numpy.complex(0)
            else:
                r = numpy.sqrt(numpy.square(x) + numpy.square(y))
                theta = numpy.arctan2(x, y) - self.angle

                a = r * numpy.cos(theta)
                b = r * numpy.sin(theta)

                out0[ii] = numpy.complex(a, b)

        self.consume(0, nitems_to_consume)
        return nitems_to_consume
Пример #20
0
def interpolation2D(inputdata,
                    vsize=200, hsize=400, 
                    method='cubic'):
    """
    use different 2D interpolation methods from scipy
    
    visize and hsize are the output dim after interpolation
    
    methods: nearest, linear, cubic
    
    some bugs exist. This part does't work well.
    
    """
    inputdata = np.array(inputdata)
    datas = inputdata.shape

    grid_x, grid_y = np.mgrid[0:datas[0]:np.complex(0,vsize), 0:datas[1]:np.complex(0,hsize)]

    val = np.zeros([datas[0]*datas[1]])
    pos = np.zeros([datas[0]*datas[1], 2])

    for i in range(datas[0]):
        for j in range(datas[1]):
            pos[i*datas[1]+j,0] = i
            pos[i*datas[1]+j,1] = j
            val[i*datas[1]+j] = inputdata[i,j]

    grid_z = griddata(pos, val, (grid_x, grid_y), method=method)

    return grid_z
Пример #21
0
    def check_numpy_scalar_argument_return_generic(self):
        f = PyCFunction('foo')
        f += Variable('a1', numpy.int_, 'in, out')
        f += Variable('a2', numpy.float_, 'in, out')
        f += Variable('a3', numpy.complex_, 'in, out')
        foo = f.build()
        args = 2, 1.2, 1+2j
        results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1+2j]
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1,2]
        assert_equal(foo(*args),results)

        f = PyCFunction('foo')
        f += Variable('a1', 'npy_int', 'in, out')
        f += Variable('a2', 'npy_float', 'in, out')
        f += Variable('a3', 'npy_complex', 'in, out')
        foo = f.build()
        args = 2, 1.2, 1+2j
        results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1+2j]
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1,2]
        assert_equal(foo(*args),results)
Пример #22
0
    def test_cmult_sysgen_bit_consistency(self):
        # load test data
        sysgen_test_data = loadmat('cmult_fixed_pt_test_data.mat')

        b_real_int  = np.int64(sysgen_test_data['b_real_int'].ravel())
        b_imag_int  = np.int64(sysgen_test_data['b_imag_int'].ravel())
        w_real_int  = np.int64(sysgen_test_data['w_real_int'].ravel())
        w_imag_int  = np.int64(sysgen_test_data['w_imag_int'].ravel())
        bw_real_int = np.int64(sysgen_test_data['bw_real_int'].ravel())
        bw_imag_int = np.int64(sysgen_test_data['bw_imag_int'].ravel())

        input_fractlength = float(sysgen_test_data['input_fractlength'][0,0])
        output_fractlength = float(sysgen_test_data['output_fractlength'][0,0])

        # Check that the integer products match MATLAB's output
        np.array_equal(bw_real_int, b_real_int*w_real_int - b_imag_int*w_imag_int)
        np.array_equal(bw_imag_int, b_real_int*w_imag_int + b_imag_int*w_real_int)

        N = len(b_real_int)
        for k in range(N):
            b_val = np.complex(b_real_int[k], b_imag_int[k])* 2**(-input_fractlength)
            w_val = np.complex(w_real_int[k], w_imag_int[k])* 2**(-input_fractlength)
            bw_val = np.complex(bw_real_int[k], bw_imag_int[k])* 2**(-output_fractlength)

            dtype = (0, input_fractlength)
            b = ComplexFixedInt(b_val, dtype=dtype)
            w = ComplexFixedInt(w_val, dtype=dtype)
            output_dtype = (1, output_fractlength)
            bw = ComplexFixedInt(bw_val, dtype=output_dtype)
            # print b*w
            self.assertTrue( (bw_imag_int[k] * 2**-output_fractlength) == float((b*w).imag) and (bw_real_int[k] * 2**-output_fractlength) == float((b*w).real) )
Пример #23
0
def getline(data, theta):

    dr = (data["rrange"][1] - data["rrange"][0]) / data["nregrid"][0]
    dz = (data["zrange"][1] - data["zrange"][0]) / data["nregrid"][2]
    ds = min(dr, dz)
    send = np.sqrt(data["rrange"][1] ** 2 + data["zrange"][1] ** 2)
    s = np.linspace(0.0, send, send / ds)

    r = s * np.sin(theta * np.pi / 180.0)
    z = s * np.cos(theta * np.pi / 180.0)

    ri = np.linspace(data["rrange"][0], data["rrange"][1], data["nregrid"][0])
    zi = np.linspace(data["zrange"][0], data["zrange"][1], data["nregrid"][2])

    tmp0 = np.complex(0, data["nregrid"][0])
    tmp1 = np.complex(0, data["nregrid"][2])

    grid_x, grid_y = np.mgrid[
        data["rrange"][0] : data["rrange"][1] : tmp0, data["zrange"][0] : data["zrange"][1] : tmp1
    ]

    online = griddata(
        np.array(zip(grid_x.flatten(), grid_y.flatten())),
        data["data"].data.transpose().flatten(),
        zip(r, z),
        method="linear",
    )

    return {"theta": theta, "r": s, "data": online}
Пример #24
0
def velovect(u1,u2,d,minvel=1e-40,nvect=None,scalevar=None,scale=100,color='k',fig=None):
    '''Plots normalized velocity vectors'''


    if fig==None:
        ax=plt.gca()
    else:
        ax=fig.ax

    CC=d.getCenterPoints()
    n=np.sqrt(u1**2+u2**2)
    # remove zero velocity:
    m=n<minvel
    vr=np.ma.filled(np.ma.masked_array(u1/n,m),0.)
    vz=np.ma.filled(np.ma.masked_array(u2/n,m),0.)
    if scalevar != None:
        vr = vr*scalevar
        vz = vz*scalevar
    if nvect==None:
        Q=ax.quiver(CC[:,0],CC[:,1],vr,vz,pivot='middle',width=1e-3,minlength=0.,scale=scale,
                    headwidth=6)
    else:
        # regrid the data:
        tmp0=np.complex(0,nvect[0])
        tmp1=np.complex(0,nvect[1])
        grid_r, grid_z = np.mgrid[ax.get_xlim()[0]:ax.get_xlim()[1]:tmp0, ax.get_ylim()[0]:ax.get_ylim()[1]:tmp1]
        grid_vr = griddata(CC, vr, (grid_r, grid_z), method='nearest')
        grid_vz = griddata(CC, vz, (grid_r, grid_z), method='nearest')
        Q=ax.quiver(grid_r,grid_z,grid_vr,grid_vz,pivot='middle',width=2e-3,minlength=minvel,scale=scale,
                    headwidth=10,headlength=10,color=color,edgecolor=color,rasterized=True)

    plt.draw()
    return Q     
Пример #25
0
    def __init__(self, array, epsilon=-1, resample=-1):
        try:
            self.points = np.copy(array)
            if epsilon > 0:
                self.points = cv2.approxPolyDP(self.points, epsilon, True)
            if resample > 0:
                pass
            self.moments = cv2.moments(self.points, False)
            zeroth = self.moments["m00"]
            self.x = self.moments["m10"] / zeroth
            self.y = self.moments["m01"] / zeroth
            self.area = cv2.contourArea(self.points)
            self.perim = cv2.arcLength(self.points,True)
            self.hull = cv2.convexHull(self.points)
            self.area_hull = cv2.contourArea(self.hull)
            self.perim_hull = cv2.arcLength(self.hull,True)
            rect = cv2.minAreaRect(self.points)
            box = cv.BoxPoints(rect)
            a = np.complex(box[0][0], box[0][1])
            b = np.complex(box[1][0], box[1][1])
            c = np.complex(box[2][0], box[2][1])
            self.w, self.h = sorted([np.abs(a-b), np.abs(c-b)])
            self.is_valid = True
        except ZeroDivisionError:
            self.area = 0
            self.perim = 0
            self.hull = 0
            self.area_hull = 0
            self.perim_hull = 0

            self.w, self.h = 0,0
            self.is_valid = False
Пример #26
0
def potbesldho(x, y, z1, z2, labda, order, ilap, naq):

    # Input:
    #   x,y: Point where potential is computed
    #   z1: Complex begin point of line-doublet
    #   z2: Complex end point of line-doublet
    #   labda(naq): labda's (zero for first labda if Laplace)
    #   order: Order of the line-doublet
    #   ilap: equals 1 when first value is Laplace line-doublet and first labda equals zero
    #   naq: Number of aquifers
    #   rv(naq): Array to store return value (must be pre-allocated)
    # Output:
    #   rv(naq): Potentials. Fist spot is Laplace value if ilap=1

    rv = np.zeros(naq)

    # Radius of convergence
    Rconv = 7.0

    # lstype=2 means line-doublet
    lstype = 2
    zin, z1in, z2in, Lin, z, zplus1, zmin1 = prepare_z(x, y, z1, z2)

    # Laplace line-doublet
    if ilap == 1:
        comega = z ** order * np.log(zmin1 / zplus1)
        qm = np.complex(0.0, 0.0)
        for n in range(1, int((order + 1) / 2) + 1):
            qm = qm + z ** (order - 2.0 * float(n) + 1.0) / (2.0 * float(n) - 1.0)

        comega = 1.0 / (2.0 * np.pi * np.complex(0.0, 1.0)) * (comega + 2.0 * qm)
        rv[0] = np.real(comega)

    # N-1 leakage factors
    for i in range(ilap, naq):
        pot = 0.0
        # Check whether entire linedoublet is outside radius of convergence
        # Outside if |z-zc|>L/2+7lab, and thus |Z|>1+7lab*2/L, or |zeta|>1/biglab+7 (zeta is called z here)
        biglab = 2.0 * labda[i] / Lin
        z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab

        if abs(z) < (Rconv + 1.0 / biglab):
            m1, m2, NLS = findm1m2(zin, z1in, z2in, Lin, labda[i], Rconv)
            comega = np.complex(0.0, 0.0)
            if m1 > 0:  # Otherwise outside radius of convergence
                z1new = z1in + float(m1 - 1) / float(NLS) * (z2in - z1in)
                z2new = z1in + float(m2) / float(NLS) * (z2in - z1in)
                del0 = float(1 - m1 - m2 + NLS) / float(1 - m1 + m2)
                ra = float(NLS) / float(1 + m2 - m1)
                comega = IntegralLapLineDipole(zin, z1new, z2new, del0, ra, order)

            pot = IntegralF(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
            rv[i] = (
                np.real(comega / np.complex(0.0, 1.0)) + np.imag(z) / biglab * pot
            )  # Note that z is really zeta in analysis
        else:
            rv[i] = 0.0

    return rv
Пример #27
0
 def testComplex128(self):
   self._testAll(
       np.complex(1, 2) *
       np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
   self._testAll(
       np.complex(1, 2) *
       np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
   self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
Пример #28
0
def place_electrodes_2D(nx, ny):
    '''place nx*ny electrodes next to the column - like an MEA'''
    tot_ele = nx * ny
    zz = np.ones((tot_ele, 1)) * -25.
    xx, yy = np.mgrid[-375:375:np.complex(0, nx), -2250:450:np.complex(0, ny)]
    xx = xx.reshape(tot_ele, 1)
    yy = yy.reshape(tot_ele, 1)
    return np.hstack((xx, yy, zz))
Пример #29
0
 def testComplex128(self):
   self._testBoth(np.complex(1, 2) *
                  np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
   self._testBoth(np.complex(1, 2) *
                  np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
   self._testBoth(
       np.complex(1, 2) *
       np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
Пример #30
0
def calc_mu(k, x1, x2, x3, zeta, abel):
    mu = []
    for i in range(0, 4, 1):
        mu.append(complex(
            0.25 *pi* ((jtheta(1, abel[i]*pi,  qfrom(k=k), 1) / (jtheta(1, abel[i]*pi,  qfrom(k=k), 0)) )
                       + (jtheta(3, abel[i]*pi,  qfrom(k=k), 1) / (jtheta(3, abel[i]*pi,  qfrom(k=k), 0)) )) \
            - x3 - (x2 + complex(0,1) *x1) * zeta[i]))
    return mu
"""
Created on Sat Dec 14 13:10:53 2019

@author: lukemcculloch
"""

import numpy as np
import scipy as sp
laplace = sp.ndimage.filters.laplace
import matplotlib.pyplot as plt

# https://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array
from colorsys import hls_to_rgb
from matplotlib.colors import hsv_to_rgb

imag = np.complex(0.,1.)
pi = np.pi


def colorize(z):
    n,m = z.shape
    c = np.zeros((n,m,3))
    c[np.isinf(z)] = (1.0, 1.0, 1.0)
    c[np.isnan(z)] = (0.5, 0.5, 0.5)

    idx = ~(np.isinf(z) + np.isnan(z))
    A = (np.angle(z[idx]) + np.pi) / (2*np.pi)
    A = (A + 0.5) % 1.0
    
    #A = (np.angle(z[idx]) ) 
    B = 1.0 - 1.0/(1.0+abs(z[idx])**0.3)
Пример #32
0
#args=(data,mat,ant1,ant2)
#x=np.linspace(-.98,1.2,18)
#print (corrcal2.get_chisq(gvec,*args),'chi sq of x')


for m in range(runs):
    niter=1000;
    fac=1.e9;
    normfac=1.e-8
    asdf=fmin_cg(corrcal2.get_chisq,gvec*fac,corrcal2.get_gradient,(data,mat,ant1,ant2,fac,normfac))
    fit_gains[m,:]=asdf/fac
    abs_full=np.array([])
    for i in range(2*Ndish):
        if i%2==0:
            abs_full=np.append(abs_full,np.abs(np.complex(sim_gains[i],sim_gains[1+i]))) #have no idea what abs value gain to divide by,
                                                                                        #could be absolute fit gains
    #fit_gains[m,:]=fit_gains[m,:]/(np.mean(abs_full))

            #print (np.complex(fit_gains[m,i],fit_gains[m,1+i]),'comp fit gains')
    #for i in range(Ndish):
        #fit_gains[m,:]=fit_gains[m,:]/(np.mean(np.abs(np.complex(fit_gains[m,i],fit_gains[m,9+i]))))

#np.save('fit_gains_4.npy',fit_gains)


gain_std=(np.std(fit_gains,axis=0)/np.sqrt(runs)).flatten()
gain_mean=np.mean(fit_gains,axis=0).flatten()

#print (gvec.flatten()[0::2],'sim gains with fluctuation')
#print (fit_gains.flatten()[0::2],'fit gains')
Пример #33
0
 def test_simple_conjugate(self):
     ref = np.conj(np.sqrt(np.complex(1, 1)))
     def f(z):
         return np.sqrt(np.conj(z))
     yield check_complex_value, f, 1, 1, ref.real, ref.imag, False
from numpy import exp, real, pi, complex, linspace, arange, conj
from numpy.fft import fft, ifft
from matplotlib import pyplot as plt


def conv(gaus, hln):
    gaus_ft = fft(gaus)
    return real(ifft(gaus_ft * hln))


x = linspace(-20, 20, 200)
sigma = 0.5
N = len(x)
dx = 50
p = arange(N)
c = complex(0, 1)
gaus = exp(-0.5 * x**2 / sigma**2)
hln = exp(2 * pi * c * p * dx / N)
l = conv(gaus, hln)
plt.plot(x, l, 'b')
T = conv(gaus, hln)


def corr(l, T):
    ft = fft(l)
    conju = conj(fft(T))
    return ifft(ft * conju)


H = corr(l, T)
H1 = real(H)
Пример #35
0
    def update(self, event):
        if event.inaxes != self.axes4:
            return

        if event.xdata != None:
            x = (int(event.xdata) + self.imageWidth // 2) % self.imageWidth
            y = (int(event.ydata) + self.imageHeight // 2) % self.imageHeight

            plt.sca(self.axes5)
            plt.cla()
            waveImg = numpy.zeros((self.imageHeight, self.imageWidth))
            waveImg[y, x] = 1
            plt.imshow(numpy.real(fftpack.ifft2(waveImg)), cmap='gray')

            if not self.bCtrlPressed:
                bNeedUpdate = False
                if self.samples[y, x] != self.fftImage[
                        y, x] and self.mouseButton == 1:  #left button
                    bNeedUpdate = True
                    self.samples[y, x] = self.fftImage[y, x]
                    self.samplePoints[(y - self.imageHeight // 2) %
                                      self.imageHeight,
                                      (x - self.imageWidth // 2) %
                                      self.imageWidth, 0] = 1
                    self.samplePoints[(y - self.imageHeight // 2) %
                                      self.imageHeight,
                                      (x - self.imageWidth // 2) %
                                      self.imageWidth, 3] = 1
                elif self.samples[y, x] != numpy.complex(
                        0.0, 0.0) and self.mouseButton == 3:  #right button
                    bNeedUpdate = True
                    self.samples[y, x] = numpy.complex(0.0, 0.0)
                    self.samplePoints[(y - self.imageHeight // 2) %
                                      self.imageHeight,
                                      (x - self.imageWidth // 2) %
                                      self.imageWidth, 0] = 0
                    self.samplePoints[(y - self.imageHeight // 2) %
                                      self.imageHeight,
                                      (x - self.imageWidth // 2) %
                                      self.imageWidth, 3] = 0

                if bNeedUpdate:
                    plt.sca(self.axes4)
                    plt.cla()
                    p = plt.imshow(self.fftImageForPlot, cmap='gray')
                    p.set_clim(self.fftMean - self.fftStd,
                               self.fftMean + self.fftStd)
                    plt.imshow(self.samplePoints)

                    plt.sca(self.axes3)
                    plt.cla()
                    plt.imshow(numpy.real(fftpack.ifft2(self.samples)),
                               cmap='gray')

            else:
                for xi in range(x - self.imageWidth // 32,
                                x + self.imageWidth // 32):
                    for yi in range(y - self.imageWidth // 32,
                                    y + self.imageWidth // 32):
                        if xi >= self.imageWidth:
                            xx = xi - self.imageWidth
                        else:
                            xx = xi
                        if yi >= self.imageHeight:
                            yy = yi - self.imageHeight
                        else:
                            yy = yi
                        if self.mouseButton == 1:  #left button
                            self.samples[yy, xx] = self.fftImage[yy, xx]
                            self.samplePoints[(yy - self.imageHeight // 2) %
                                              self.imageHeight,
                                              (xx - self.imageWidth // 2) %
                                              self.imageWidth, 0] = 1
                            self.samplePoints[(yy - self.imageHeight // 2) %
                                              self.imageHeight,
                                              (xx - self.imageWidth // 2) %
                                              self.imageWidth, 3] = 0.7
                        elif self.mouseButton == 3:  #right button
                            self.samples[yy, xx] = numpy.complex(0.0, 0.0)
                            self.samplePoints[(yy - self.imageHeight // 2) %
                                              self.imageHeight,
                                              (xx - self.imageWidth // 2) %
                                              self.imageWidth, 0] = 0
                            self.samplePoints[(yy - self.imageHeight // 2) %
                                              self.imageHeight,
                                              (xx - self.imageWidth // 2) %
                                              self.imageWidth, 3] = 0
                plt.sca(self.axes4)
                plt.cla()
                plt.imshow(self.samplePoints)

                plt.sca(self.axes3)
                plt.cla()
                plt.imshow(numpy.real(fftpack.ifft2(self.samples)),
                           cmap='gray')

            self.fig.canvas.draw()
Пример #36
0
    def getimage(self, z):
        '''

        :param z: the Zernike vector in microns, starting from Z=2 (tip)
                    z[0] is seeing in arcseconds
        :return:
        '''
        #COMMON imagedata, uampl, filter2, seeing

        fact = 2. * np.pi / self.alambda

        nzer = len(z)
        phase = np.zeros_like(self.zgrid[0])  # empty array for phase

        for j in range(1, nzer):
            phase += fact * z[j] * ztools.zernike_estim(j + 1, self.zgrid)
        # # log.debug('GETIMAGE: %s %s'%(phase[0],phase[-1]))
        # exit(0)
        uampl = np.zeros((self.ngrid * 2, self.ngrid * 2), dtype=np.complex)
        #uampl = np.complex(tmp, tmp)
        self.uampl = uampl

        uampl[self.inside] += np.cos(phase)  #,
        uampl[self.inside] += (np.sin(phase) * np.complex(0, 1))

        #uampl[np.bitwise_not(self.inside)] = 0.

        self.seeing = z[0]

        #---------  compute the image ----------------------
        # imh = np.abs(ztools.shift(np.fft.ifft2(ztools.shift(uampl,self.ngrid+self.fovpix/2,self.ngrid+self.fovpix/2)),self.ngrid+self.fovpix/2,self.ngrid+self.fovpix/2))**2.
        imh = np.abs(
            ztools.shift(
                np.fft.ifft2(ztools.shift(uampl, self.ngrid, self.ngrid)),
                self.ngrid, self.ngrid))**2.

        if (self.sflag > 0):  # exact seeing blur

            filter2 = np.exp(
                -2. * np.pi**2 *
                (self.seeing / 2.35 / self.asperpix / 2 / self.ngrid)**2 *
                self.r**2)  # unbinned image
            imh = np.abs(
                np.fft2(
                    ztools.shift(np.fft2(imh), self.ngrid, self.ngrid) *
                    filter2))
            impix = ztools.rebin(
                imh, (self.fovpix, self.fovpix))  # rebinning into CCD pixels

        else:
            rr = ztools.shift(ztools.dist(self.fovpix), self.fovpix / 2,
                              self.fovpix / 2)
            filter2 = np.exp(
                -2. * np.pi**2 *
                (self.seeing / 2.35 / self.ccdpix / self.fovpix)**2 *
                rr**2)  # binned image
            impix = ztools.rebin(
                imh, [self.fovpix, self.fovpix])  # rebinning into CCD pixels
            impix = np.abs(
                np.fft.fft2(
                    ztools.shift(np.fft.fft2(impix), self.fovpix / 2,
                                 self.fovpix / 2) * filter2))  # Seeing blur

        self.filter2 = filter2
        return impix / np.sum(impix)
Пример #37
0
 i = 0
 with open('UI3_B_shell.txt', newline='') as csvfile:
     spamreader = csv.reader(csvfile, delimiter=' ')
     for row in spamreader:
         temp_res[i, :] = [row[2], row[3]]
         i += 1
 P = temp_res[0:3, 0]
 P_mean = np.mean(P)
 Q = temp_res[0:3, 1]
 Q_mean = np.mean(Q)
 U = np.zeros([3, 1], dtype=complex)
 I = np.zeros([3, 1], dtype=complex)
 Z = np.zeros([3, 1], dtype=complex)
 if j == 1e6:
     for i in range(0, 3):
         U[i] = np.complex(temp_res[i + 3, 0], temp_res[i + 3, 1])
     R[j] = np.abs(U).T**2 / P
     X[j] = np.abs(U).T**2 / Q
     R_mean = np.mean(np.abs(U))**2 / P_mean
     X_mean = np.mean(np.abs(U))**2 / Q_mean
     R[j] = np.concatenate((R[j][0].T, [R_mean]))
     X[j] = np.concatenate((X[j][0].T, [X_mean]))
 else:
     for i in range(0, 3):
         I[i] = np.complex(temp_res[i + 6, 0], temp_res[i + 6, 1])
     R[j] = P / (np.abs(I).T**2)
     X[j] = Q / (np.abs(I).T**2)
     R_mean = P_mean / (np.mean(np.abs(I))**2)
     X_mean = Q_mean / (np.mean(np.abs(I))**2)
     R[j] = np.concatenate((R[j][0].T, [R_mean]))
     X[j] = np.concatenate((X[j][0].T, [X_mean]))
Пример #38
0
def mixer(state, M, beta):
    eibxxyy = expm(np.complex(0, -1) * beta * M)
    return np.matmul(eibxxyy, state)
Пример #39
0
#%%
"""
Created on Thu Jan 03 2019
CGMY and implied volatilities obtained with the COS method
@author: Lech A. Grzelak
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import enum
import scipy.optimize as optimize
import scipy.special as stFunc

# Set i= imaginary number

i = np.complex(0.0, 1.0)

# This class defines puts and calls


class OptionType(enum.Enum):
    CALL = 1.0
    PUT = -1.0


def CallPutOptionPriceCOSMthd(cf, CP, S0, r, tau, K, N, L):

    # cf   - Characteristic function is a function, in the book denoted by \varphi
    # CP   - C for call and P for put
    # S0   - Initial stock price
    # r    - Interest rate (constant)
Пример #40
0
def phase_separator(state, C, gamma):
    eiC = np.exp(np.complex(0, -1) * gamma * C)
    return np.multiply(eiC, state)
Пример #41
0
def build_vdf(data_np,
              nodedf,
              days=[],
              tgs=[],
              nTG=None,
              nvel=None,
              node4vel_range=1.0,
              **kwargs):
    #
    # Let's have graphsnapper call this function from each processor
    # Pass build_vdf the appropriate numpy data slice to be processed each call
    #
    if len(days) == 0:
        # Grab all days
        days = np.arange(7)
    if len(tgs) == 0:
        if not nTG:
            print("If no tgs provided, specify nTG. Exiting")
            return
        tgs = np.arange(nTG)

    vdf = pd.DataFrame(columns=[
        "day", "tg", "x_km", "y_km", "vx", "vy", "v", "nodeID", "dist2node",
        "angle"
    ])

    vi = 0
    #for v in tqdm(vfile.readlines(), disable=kwargs["disable_tqdm"]):
    for row in tqdm(data_np, disable=kwargs["disable_tqdm"]):
        day = int(row[0])
        tg = int(row[1])
        if day not in days: continue
        if tg not in tgs: continue
        # Apparently appending a list of dict
        # preserves the datatype
        # See: https://stackoverflow.com/questions/21281463/appending-to-a-dataframe-converts-dtypes
        x_km, y_km = float(row[2]), float(row[3])
        nbrnodes, d2ns = nodes4vel([x_km, y_km], nodedf, within=node4vel_range)
        if len(nbrnodes) == 0:
            # point is too far from any nodes for
            # this to be accurate data
            continue

        # Get vel angle in [-pi,pi] format
        vx, vy = float(row[4]), float(row[5])
        z = np.complex(vx, vy)
        angle = np.angle(z)

        # Iterate over neighbours and add to vdf
        for inbr in range(len(nbrnodes)):
            vdf = vdf.append([{
                "day": day,
                "tg": tg,
                "x_km": x_km,
                "y_km": y_km,
                "vx": vx,
                "vy": vy,
                "v": float(row[6]),
                "nodeID": nbrnodes[inbr],
                "dist2node": d2ns[inbr],
                "angle": angle
            }],
                             ignore_index=True)

        vi += 1
        if nvel and vi >= nvel: break

    vdf.drop_duplicates(inplace=True)
    return vdf
Пример #42
0
def get_edge_angle(nodedf, i, j):
    dr = np.array(nodedf.at[j, "coords_km"]) - np.array(nodedf.at[i,
                                                                  "coords_km"])
    z = np.complex(dr[0], dr[1])
    return np.angle(z)
Пример #43
0
def plot_decision_boundary_2d(dataset, clf=None,
                              targets=None, regions=None, maps=None,
                              maps_res=50, vals=[-1, 0, 1],
                              data_callback=None):
    """Plot a scatter of a classifier's decision boundary and data points

    Assumes data is 2d (no way to visualize otherwise!!)

    Parameters
    ----------
    dataset : `Dataset`
      Data points to visualize (might be the data `clf` was train on, or
      any novel data).
    clf : `Classifier`, optional
      Trained classifier
    targets : string, optional
      What samples attributes to use for targets.  If None and clf is
      provided, then `clf.params.targets_attr` is used.
    regions : string, optional
      Plot regions (polygons) around groups of samples with the same
      attribute (and target attribute) values. E.g. chunks.
    maps : string in {'targets', 'estimates'}, optional
      Either plot underlying colored maps, such as clf predictions
      within the spanned regions, or estimates from the classifier
      (might not work for some).
    maps_res : int, optional
      Number of points in each direction to evaluate.
      Points are between axis limits, which are set automatically by
      matplotlib.  Higher number will yield smoother decision lines but come
      at the cost of O^2 classifying time/memory.
    vals : array of floats, optional
      Where to draw the contour lines if maps='estimates'
    data_callback : callable, optional
      Callable object to preprocess the new data points.
      Classified points of the form samples = data_callback(xysamples).
      I.e. this can be a function to normalize them, or cache them
      before they are classified.
    """

    if False:
        ## from mvpa2.misc.data_generators import *
        ## from mvpa2.clfs.svm import *
        ## from mvpa2.clfs.knn import *
        ## ds = dumb_feature_binary_dataset()
        dataset = normal_feature_dataset(nfeatures=2, nchunks=5,
                                         snr=10, nlabels=4, means=[ [0,1], [1,0], [1,1], [0,0] ])
        dataset.samples += dataset.sa.chunks[:, None]*0.1 # slight shifts for chunks ;)
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=3, means=[ [0,1], [1,0], [1,1] ])
        #dataset = normal_feature_dataset(nfeatures=2, nlabels=2, means=[ [0,1], [1,0] ])
        #clf = LinearCSVMC(C=-1)
        clf = kNN(4)#LinearCSVMC(C=-1)
        clf.train(dataset)
        #clf = None
        #plot_decision_boundary_2d(ds, clf)
        targets = 'targets'
        regions = 'chunks'
        #maps = 'estimates'
        maps = 'targets'
        #maps = None #'targets'
        res = 50
        vals = [-1, 0, 1]
        data_callback=None
        pl.clf()

    if dataset.nfeatures != 2:
        raise ValueError('Can only plot a decision boundary in 2D')

    Pioff()
    a = pl.gca() # f.add_subplot(1,1,1)

    attrmap = None
    if clf:
        estimates_were_enabled = clf.ca.is_enabled('estimates')
        clf.ca.enable('estimates')

        if targets is None:
            targets = clf.get_space()
        # Lets reuse classifiers attrmap if it is good enough
        attrmap = clf._attrmap
        predictions = clf.predict(dataset)

    targets_sa_name = targets           # bad Yarik -- will rebind targets to actual values
    targets_lit = dataset.sa[targets_sa_name].value
    utargets_lit = dataset.sa[targets_sa_name].unique

    if not (attrmap is not None
            and len(attrmap)
            and set(clf._attrmap.keys()).issuperset(utargets_lit)):
        # create our own
        attrmap = AttributeMap(mapnumeric=True)

    targets = attrmap.to_numeric(targets_lit)
    utargets = attrmap.to_numeric(utargets_lit)

    vmin = min(utargets)
    vmax = max(utargets)
    cmap = pl.cm.RdYlGn                  # argument

    # Scatter points
    if clf:
        all_hits = predictions == targets_lit
    else:
        all_hits = np.ones((len(targets),), dtype=bool)

    targets_colors = {}
    for l in utargets:
        targets_mask = targets==l
        s = dataset[targets_mask]
        targets_colors[l] = c \
            = cmap((l-vmin)/float(vmax-vmin))

        # We want to plot hits and misses with different symbols
        hits = all_hits[targets_mask]
        misses = np.logical_not(hits)
        scatter_kwargs = dict(
            c=[c], zorder=10+(l-vmin))

        if sum(hits):
            a.scatter(s.samples[hits, 0], s.samples[hits, 1], marker='o',
                      label='%s [%d]' % (attrmap.to_literal(l), sum(hits)),
                      **scatter_kwargs)
        if sum(misses):
            a.scatter(s.samples[misses, 0], s.samples[misses, 1], marker='x',
                      label='%s [%d] (miss)' % (attrmap.to_literal(l), sum(misses)),
                      edgecolor=[c], **scatter_kwargs)

    (xmin, xmax) = a.get_xlim()
    (ymin, ymax) = a.get_ylim()
    extent = (xmin, xmax, ymin, ymax)

    # Create grid to evaluate, predict it
    (x,y) = np.mgrid[xmin:xmax:np.complex(0, maps_res),
                    ymin:ymax:np.complex(0, maps_res)]
    news = np.vstack((x.ravel(), y.ravel())).T
    try:
        news = data_callback(news)
    except TypeError: # Not a callable object
        pass

    imshow_kwargs = dict(origin='lower',
            zorder=1,
            aspect='auto',
            interpolation='bilinear', alpha=0.9, cmap=cmap,
            vmin=vmin, vmax=vmax,
            extent=extent)

    if maps is not None:
        if clf is None:
            raise ValueError, \
                  "Please provide classifier for plotting maps of %s" % maps
        predictions_new = clf.predict(news)

    if maps == 'estimates':
        # Contour and show predictions
        trained_targets = attrmap.to_numeric(clf.ca.trained_targets)

        if len(trained_targets)==2:
            linestyles = []
            for v in vals:
                if v == 0:
                    linestyles.append('solid')
                else:
                    linestyles.append('dashed')
            vmin, vmax = -3, 3 # Gives a nice tonal range ;)
            map_ = 'estimates' # should actually depend on estimates
        else:
            vals = (trained_targets[:-1] + trained_targets[1:])/2.
            linestyles = ['solid'] * len(vals)
            map_ = 'targets'

        try:
            clf.ca.estimates.reshape(x.shape)
            a.imshow(map_values.T, **imshow_kwargs)
            CS = a.contour(x, y, map_values, vals, zorder=6,
                           linestyles=linestyles, extent=extent, colors='k')
        except ValueError, e:
            print "Sorry - plotting of estimates isn't full supported for %s. " \
                  "Got exception %s" % (clf, e)
Пример #44
0
ncl = numpy.empty((2,mx1),int_type,'F')
# ihole = location/destination of each particle departing tile
ihole = numpy.empty((2,ntmax+1,mx1),int_type,'F')
# copy ordered particle data for OpenMP: updates ppart and kpic
ppmovin1l(part,ppart,kpic,nppmx0,idimp,np,mx,mx1,irc)
if (irc[0] != 0):
   print "ppmovin1l overflow error, irc=", irc[0]
   exit(0)
# sanity check
ppcheck1l(ppart,kpic,idimp,nppmx0,nx,mx,mx1,irc)
if (irc[0] != 0):
   print "ppcheck1l error, irc=", irc[0]
   exit(0)

# initialize transverse electromagnetic fields
eyz.fill(numpy.complex(0.0,0.0))
byz.fill(numpy.complex(0.0,0.0))

if (dt > 0.64*ci):
   print "Warning: Courant condition may be exceeded!"

# * * * start main iteration loop * * *

for ntime in xrange(0,nloop):
#  print "ntime = ", ntime

# deposit current with OpenMP:
   dtimer(dtime,itime,-1)
   cue.fill(0.0)
   if (relativity==1):
# updates ppart, cue
Пример #45
0
 def env_aug_shaping_function(freqs):
     centers = np.ones_like(freqs) * np.complex(49, -50)
     radius = np.ones_like(freqs) * 200
     return centers, radius
 def local_stiffness_matrix(self):
     stiffness_matrix = np.array(
         [[self.k_x * np.complex(1, self.n_x), 0, 0],
          [0, self.k_y * np.complex(1, self.n_y), 0],
          [0, 0, self.k_z * np.complex(1, self.n_z)]])
     return stiffness_matrix
Пример #47
0
 def eval(self, *args, **kwds):
     r"""Evaluate the differential at the complex point :math:`(x,y)`.
     """
     val = self.numer_n(*args, **kwds) / self.denom_n(*args, **kwds)
     return numpy.complex(val)
Пример #48
0
 def g(a, b):
     return np.abs(np.complex(a, b))
Пример #49
0
 def init_m(self, m, n, order='F'):
     M = N.zeros((m, n), order=order, dtype=self.dtype)
     for i in range(M.shape[0]):
         for j in range(M.shape[1]):
             M[i, j] = N.complex(i * 10 + j, 0.1 * (i * 10 + j))
     return M
Пример #50
0
def mp_to_complex(mpcarr):
    a = np.zeros(len(mpcarr),dtype=np.complex)
    for i in range(len(mpcarr)):
        a[i] = np.complex(mpcarr[i])
    return a
Пример #51
0
    def load_flow(self, network):
        """
        This method will implement the Load Flow algorithm.
        :param: network: the network on which we want to do the load flow.
        :return: dic: A dictionnary containing every matrix/array involved in the load flow resolution.
        """
        # main.m
        alpha = 1
        nb_brackets = network.get_nb_brackets()-1
        # Battery settings
        bat_node = 2
        bat_phase = 2
        bat = (bat_node-2)*3 + bat_phase
        Ebat = 0
        Ebat_max = 120000
        Pbat = 60000
        # End
        # Grid_definition.m
        grid = self.grid_definition(network)
        K = grid['K']
        Zbr = grid['Zbr']
        vec_phases_index = grid['vec_phases_index']
        # End of Grid_Definition
        brackets = network.get_brackets()[1:]
        network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]
        # load_flow.m
        Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)
        Ibus = Ibus[:, np.newaxis]
        Vnl = network.get_slack_voltage()
        Vnl = Vnl[vec_phases_index]
        Vbus = Vnl
        Vbr_prev = Vnl
        # If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape
        # (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape
        # (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions
        # and then will join'em. If Vnl(57,1) & Newmat(10,96):
        # Result = (1, 57*10, 96)... Which is not really what we want.
        Tmp = (Vnl * 0)
        Tmp = Tmp[:, np.newaxis]
        V = np.tile(Tmp, (1,1,1))
        I = np.tile(Tmp, (1,1,1))
        # We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug
        # that has been resolved earlier won't happen here
        # Imean = np.tile(Vnl*0, (96))
        # Vmean = np.tile(Vnl*0, (96))
        powers = []

        for node in network_nodes:
            n_pow = []
            for user in node.get_users():
                n_pow.append(user.get_P())
            powers.extend(n_pow)

        """
        Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain
        a little bit more efficiency.
        """
        # NumPy Functions
        conj = np.conj
        divide = np.divide
        absolute = np.abs
        less = np.less
        zeros = np.zeros
        # Here is the wrapping of the load flow:
        # h = 0, nb iterations
        # q = 0, 96
        P = np.asarray(powers)
        P = divide(P, 2)
        Q = np.dot(P, np.array([0]))
        # Initializing arrays to optimize
        Ibr = zeros((nb_brackets, 1))
        Vbr = zeros((nb_brackets, 1))
        # Before we enter the loop, we make sure we are going to work with matrices instead of arrays.
        Ibr = np.matrix(Ibr)
        Vbr = np.matrix(Vbr)
        # LOAD FLOW LOOP
        k = 0
        t = process_time()
        while True:
            k += 1
            bal = 0
            for i in range(len(P)):
                if k == 1:
                    Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())
                else:
                    Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())
                if i % 3 == bat:
                    bal = bal + P[i]
            if bat != 0:
                if bal < 0:
                    if Ebat < Ebat_max:
                        Ibus[bat] = min([conj(-Pbat/Vbus[bat]),
                                         conj(bal/Vbus[bat]),
                                         conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])
                        Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25
                    elif Ebat > 0:
                        Ibus[bat] = min([conj(Pbat/Vbus[bat]),
                                         conj(bal/Vbus[bat]),
                                         conj(Ebat/(Vbus[bat]*0.25))])
                        Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.
            Ibr = K * Ibus
            Vbr = Zbr * Ibr
            if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():
                break
            Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))
            Vbr_prev = Vbr
            Vbus = Vnl + np.dot(K.conj().T, Vbr)
        Vbus = Vnl + np.dot(K.conj().T, Vbr)
        V[:] = Vbus[:, :, np.newaxis]
        I[:] = Ibr[:, :, np.newaxis]
        Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])
        for i in range(nb_brackets):
            for j in range(len(vec_phases_index)):
                i_to_j = self.powerflow(Vbus[i], Ibr[i])
                j_to_i = self.powerflow(Vbus[i+1], Ibr[i])
                Pbr[i][j][0] = i_to_j['active']
                Pbr[i][j][1] = j_to_i['active']
                Qbr[i][j][0] = i_to_j['reactive']
                Qbr[i][j][1] = j_to_i['reactive']
        print(np.shape(Pbr), Qbr.shape)
        # END OF LOAD FLOW
        # End of load_flow.m
        print("Process executed in", process_time() - t, "s")
        dic = {
            'Ibus_bat': Ibus[bat],
            'Ebat': Ebat,
            'V': V,
            'Vbr': Vbr,
            'Vbus': Vbus,
            'I': I,
            'Ibus': Ibus,
            'Ibr': Ibr,
            'Zbr': Zbr,
            'P': P,
            'K': K,
            'Vnl': Vnl,
            'Pbr': Pbr,
            'Qbr': Qbr
        }
        return dic
Пример #52
0
    def linearize(self, params, unknowns, resids):
        """
        Uses complex step method to calculate a Jacobian dict.

        Args
        ----
        params : `VecWrapper`
            `VecWrapper` containing parameters. (p)

        unknowns : `VecWrapper`
            `VecWrapper` containing outputs and states. (u)

        resids : `VecWrapper`
            `VecWrapper` containing residuals. (r)

        Returns
        -------
        dict
            Dictionary whose keys are tuples of the form ('unknown', 'param')
            and whose values are ndarrays.
        """

        # our complex step
        step = self.complex_stepsize * 1j

        J = OrderedDict()
        non_pbo_unknowns = self._non_pbo_unknowns

        for param in params:

            pwrap = _TmpDict(params)

            pval = params[param]
            if isinstance(pval, ndarray):
                # replace the param array with a complex copy
                pwrap[param] = numpy.asarray(pval, complex)
                idx_iter = array_idx_iter(pwrap[param].shape)
                psize = pval.size
            else:
                pwrap[param] = complex(pval)
                idx_iter = (None, )
                psize = 1

            for i, idx in enumerate(idx_iter):
                # set a complex param value
                if idx is None:
                    pwrap[param] += step
                else:
                    pwrap[param][idx] += step

                uwrap = _TmpDict(unknowns, complex=True)

                # solve with complex param value
                self.solve_nonlinear(pwrap, uwrap, resids)

                for u in non_pbo_unknowns:
                    jval = numpy.atleast_1d(
                        imag(uwrap[u] / self.complex_stepsize))
                    if (u, param) not in J:  # create the dict entry
                        J[(u, param)] = numpy.zeros((jval.size, psize))

                    # set the column in the Jacobian entry
                    J[(u, param)][:, i] = jval.flat

                # restore old param value
                if idx is None:
                    pwrap[param] -= step
                else:
                    pwrap[param][idx] -= step

        return J
Пример #53
0
    def test_special_values(self):
        # C99: Section G 6.3.1

        check = check_complex_value
        f = np.exp

        # cexp(+-0 + 0i) is 1 + 0i
        yield check, f, np.PZERO, 0, 1, 0, False
        yield check, f, np.NZERO, 0, 1, 0, False

        # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
        # exception
        yield check, f,  1, np.inf, np.nan, np.nan
        yield check, f, -1, np.inf, np.nan, np.nan
        yield check, f,  0, np.inf, np.nan, np.nan

        # cexp(inf + 0i) is inf + 0i
        yield check, f,  np.inf, 0, np.inf, 0

        # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
        ref = np.complex(np.cos(1.), np.sin(1.))
        yield check, f,  -np.inf, 1, np.PZERO, np.PZERO

        ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
        yield check, f,  -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO

        # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
        ref = np.complex(np.cos(1.), np.sin(1.))
        yield check, f,  np.inf, 1, np.inf, np.inf

        ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
        yield check, f,  np.inf, 0.75 * np.pi, -np.inf, np.inf

        # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
        def _check_ninf_inf(dummy):
            msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
            err = np.seterr(invalid='ignore')
            try:
                z = f(np.array(np.complex(-np.inf, np.inf)))
                if z.real != 0 or z.imag != 0:
                    raise AssertionError(msgform %(z.real, z.imag))
            finally:
                np.seterr(**err)

        yield _check_ninf_inf, None

        # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
        def _check_inf_inf(dummy):
            msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
            err = np.seterr(invalid='ignore')
            try:
                z = f(np.array(np.complex(np.inf, np.inf)))
                if not np.isinf(z.real) or not np.isnan(z.imag):
                    raise AssertionError(msgform % (z.real, z.imag))
            finally:
                np.seterr(**err)

        yield _check_inf_inf, None

        # cexp(-inf + nan i) is +-0 +- 0i
        def _check_ninf_nan(dummy):
            msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
            err = np.seterr(invalid='ignore')
            try:
                z = f(np.array(np.complex(-np.inf, np.nan)))
                if z.real != 0 or z.imag != 0:
                    raise AssertionError(msgform % (z.real, z.imag))
            finally:
                np.seterr(**err)

        yield _check_ninf_nan, None

        # cexp(inf + nan i) is +-inf + nan
        def _check_inf_nan(dummy):
            msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
            err = np.seterr(invalid='ignore')
            try:
                z = f(np.array(np.complex(np.inf, np.nan)))
                if not np.isinf(z.real) or not np.isnan(z.imag):
                    raise AssertionError(msgform % (z.real, z.imag))
            finally:
                np.seterr(**err)

        yield _check_inf_nan, None

        # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
        # ex)
        yield check, f, np.nan, 1, np.nan, np.nan
        yield check, f, np.nan, -1, np.nan, np.nan

        yield check, f, np.nan,  np.inf, np.nan, np.nan
        yield check, f, np.nan, -np.inf, np.nan, np.nan

        # cexp(nan + nani) is nan + nani
        yield check, f, np.nan, np.nan, np.nan, np.nan
Пример #54
0
    def process(self, processor, event: pygame.event):
        focus = processor.focus
        raw_data = focus.raw_data
        pixel_array = raw_data[:, :, :3]
        pixel_array = np.swapaxes(pixel_array, 0, 1)
        pixel_bgr = cv2.cvtColor(pixel_array, cv2.COLOR_RGB2BGR)
        dets = self.detector(pixel_bgr, 1)
        shape = None

        for index, d in enumerate(dets):
            shape = self.predictor(pixel_bgr, d)

        xys = []
        for i in range(0, 68):
            xys.append((shape.part(i).x, shape.part(i).y))

        outline = xys[:17]
        l_eyebrow = xys[17:22]
        r_eyebrow = xys[22:27]
        nose = xys[27:28] + xys[31:36]
        l_eye = xys[36:42]
        r_eye = xys[42:48]
        mouth = xys[48:68]
        nose_mask = self.get_mask(nose, pixel_bgr)

        # get front head ellipse
        center = np.mean((outline[0], outline[-1]), axis=0)
        center = (int(center[0]), int(center[1]))
        comp = np.complex(*np.subtract(outline[0], outline[-1]))
        diameter = np.abs(comp)
        angle = np.angle(comp) / np.pi * 180
        minor = int(diameter / 2)
        major = int(np.linalg.norm(np.subtract(center, outline[8])))
        fronthead = self.get_zeros(pixel_bgr)
        cv2.ellipse(img=fronthead, center=center, axes=(minor, major),
                    angle=angle, startAngle=0, endAngle=180, color=(0, 0, 255), thickness=-1)
        # get front head mask
        low, up = self.get_bound(pixel_bgr[nose_mask], 1.5)
        color_mask = (pixel_bgr[:, :, :] >= low) * (pixel_bgr[:, :, :] <= up)
        color_mask = np.mean(color_mask, 2) > 0
        fronthead_mask = (fronthead[:, :, 2] == 255) * color_mask
        fronthead = self.get_zeros(pixel_bgr)
        fronthead[fronthead_mask] = (0, 0, 255)
        Y, Cr, Cb = cv2.split(cv2.cvtColor(fronthead, cv2.COLOR_BGR2YCR_CB))
        ksize = self.get_ksize(pixel_bgr.shape[:2], 20)
        Y = cv2.blur(Y, ksize)
        ret, thresh = cv2.threshold(Y, 45, 255, 0)
        points = np.array(np.where(thresh > 0), dtype=np.int32).transpose()
        landmark = cv2.convexHull(points).squeeze()
        contour = self.get_zeros(pixel_bgr)
        contour = cv2.fillConvexPoly(contour, np.flip(landmark, 1), (0, 0, 255))
        # get organ mask
        outline_mask = self.get_mask(outline, pixel_bgr)
        l_eyebrow_mask = self.get_mask(l_eyebrow, pixel_bgr)
        r_eyebrow_mask = self.get_mask(r_eyebrow, pixel_bgr)
        l_eye_mask = self.get_mask(l_eye, pixel_bgr)
        r_eye_mask = self.get_mask(r_eye, pixel_bgr)
        mouth_mask = self.get_mask(mouth, pixel_bgr)
        face_mask = (contour[:, :, 2] == 255) | (outline_mask)
        face_mask = face_mask & (~ l_eyebrow_mask) & (~ r_eyebrow_mask) & (~ l_eye_mask) & \
                    (~ r_eye_mask) & (~ mouth_mask)
        # whiten
        enhanced = pixel_bgr.copy()
        blurred_mask = cv2.blur(np.float32(face_mask), ksize)
        enhanced = (enhanced + blurred_mask[:, :, None] * np.ones(enhanced.shape) * 10).astype(np.int32)
        enhanced = np.clip(enhanced, 0, 255)
        # eye
        blurred_mask = cv2.blur(np.array(l_eye_mask | r_eye_mask, dtype=np.float32), ksize)
        H, S, V = cv2.split(cv2.cvtColor(np.float32(enhanced), cv2.COLOR_BGR2HSV))
        V = (V + blurred_mask * np.ones(enhanced.shape[:2]) * 30)
        V = np.clip(V, 0, 255).astype(np.float32)
        enhanced = cv2.cvtColor(np.float32(cv2.merge((H, S, V))), cv2.COLOR_HSV2BGR)
        # mouth
        mouth_ksize = self.get_ksize(pixel_bgr.shape[:2], 50)
        blurred_mask = cv2.blur(np.array(mouth_mask, dtype=np.float32), mouth_ksize)
        H, S, V = cv2.split(cv2.cvtColor(np.float32(enhanced), cv2.COLOR_BGR2HSV))
        S = (S + blurred_mask * np.ones(enhanced.shape[:2]) * 0.3)
        S = np.clip(S, 0, 255).astype(np.float32)
        enhanced = cv2.cvtColor(np.float32(cv2.merge((H, S, V))), cv2.COLOR_HSV2BGR)
        # smooth
        blurred_mask = cv2.blur(np.array(face_mask, dtype=np.float32), ksize)[:, :, None]
        if ksize[0] > 5:
            blurred = cv2.medianBlur(np.uint8(enhanced), ksize[0])
        else:
            blurred = cv2.medianBlur(np.float32(enhanced), ksize[0])
        enhanced = enhanced * (1 - blurred_mask * 0.5) + blurred * blurred_mask * 0.5
        enhanced = np.clip(enhanced, 0, 255).astype(np.int32)
        enhanced = cv2.cvtColor(np.float32(enhanced), cv2.COLOR_BGR2RGB)

        raw_data[:, :, :3] = np.swapaxes(enhanced, 0, 1)
        focus.raw_data = raw_data
        focus.construct_surface()
        processor.REFRESH = True
        processor.PROCESS = False
Пример #55
0
    def test_special_values(self):
        xl = []
        yl = []

        # From C99 std (Sec 6.3.2)
        # XXX: check exceptions raised
        # --- raise for invalid fails.

        # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
        # floating-point exception.
        err = np.seterr(divide='raise')
        try:
            x = np.array([np.NZERO], dtype=np.complex)
            y = np.complex(-np.inf, np.pi)
            self.assertRaises(FloatingPointError, np.log, x)
            np.seterr(divide='ignore')
            assert_almost_equal(np.log(x), y)
        finally:
            np.seterr(**err)

        xl.append(x)
        yl.append(y)

        # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
        # floating-point exception.
        err = np.seterr(divide='raise')
        try:
            x = np.array([0], dtype=np.complex)
            y = np.complex(-np.inf, 0)
            self.assertRaises(FloatingPointError, np.log, x)
            np.seterr(divide='ignore')
            assert_almost_equal(np.log(x), y)
        finally:
            np.seterr(**err)

        xl.append(x)
        yl.append(y)

        # clog(x + i inf returns +inf + i pi /2, for finite x.
        x = np.array([complex(1, np.inf)], dtype=np.complex)
        y = np.complex(np.inf, 0.5 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        x = np.array([complex(-1, np.inf)], dtype=np.complex)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(x + iNaN) returns NaN + iNaN and optionally raises the
        # 'invalid' floating- point exception, for finite x.
        err = np.seterr(invalid='raise')
        try:
            x = np.array([complex(1., np.nan)], dtype=np.complex)
            y = np.complex(np.nan, np.nan)
            #self.assertRaises(FloatingPointError, np.log, x)
            np.seterr(invalid='ignore')
            assert_almost_equal(np.log(x), y)
        finally:
            np.seterr(**err)

        xl.append(x)
        yl.append(y)

        err = np.seterr(invalid='raise')
        try:
            x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
            #self.assertRaises(FloatingPointError, np.log, x)
            np.seterr(invalid='ignore')
            assert_almost_equal(np.log(x), y)
        finally:
            np.seterr(**err)

        xl.append(x)
        yl.append(y)

        # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
        x = np.array([-np.inf + 1j], dtype=np.complex)
        y = np.complex(np.inf, np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
        x = np.array([np.inf + 1j], dtype=np.complex)
        y = np.complex(np.inf, 0)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(- inf + i inf) returns +inf + i3pi /4.
        x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
        y = np.complex(np.inf, 0.75 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+ inf + i inf) returns +inf + ipi /4.
        x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
        y = np.complex(np.inf, 0.25 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+/- inf + iNaN) returns +inf + iNaN.
        x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
        y = np.complex(np.inf, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + iy) returns NaN + iNaN and optionally raises the
        # 'invalid' floating-point exception, for finite y.
        x = np.array([complex(np.nan, 1)], dtype=np.complex)
        y = np.complex(np.nan, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + i inf) returns +inf + iNaN.
        x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
        y = np.complex(np.inf, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + iNaN) returns NaN + iNaN.
        x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
        y = np.complex(np.nan, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(conj(z)) = conj(clog(z)).
        xa = np.array(xl, dtype=np.complex)
        ya = np.array(yl, dtype=np.complex)
        err = np.seterr(divide='ignore')
        try:
            for i in range(len(xa)):
                assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i])))
        finally:
            np.seterr(**err)
Пример #56
0
def xy_grid(cpo,
            nxy=30,
            ncon=32,
            xymax='Default',
            cmin=10.,
            cmax=500.,
            threads=0,
            err_scale=1.,
            extra_error=0.,
            fix_crat=False,
            cmap='ds9cool',
            plot_as_mags=False,
            projected=False):
    '''An attempt to copy Sylvestre's chi2 grid plots, using x and y instead
    of separation and position angle.

    Written by A Cheetham, with some parts stolen from other pysco/pymask routines.'''

    #------------------------
    # first, load your data!
    #------------------------

    ndata = cpo.ndata

    u, v = cpo.u, cpo.v

    cpo.t3err = np.sqrt(cpo.t3err**2 + extra_error**2)
    cpo.t3err *= err_scale

    wavel = cpo.wavel

    w = np.array(np.sqrt(u**2 + v**2)) / np.median(wavel)

    if xymax == 'Default':
        #        xymax = cpt.rad2mas(1./np.min(w/np.max(wavel)))
        xymax = rad2mas(1. / np.min(w))

    #------------------------
    # initialise grid params
    #------------------------

    xys = np.linspace(-xymax, xymax, nxy)
    #    cons = cmin  + (cmax-cmin)  * np.linspace(0,1,ncon)
    cons = np.linspace(cmin, cmax, ncon)

    if fix_crat != False:
        cons = np.array([fix_crat])
        ncon = 1

    #------------------------
    # Calculate chi2 at each point
    #------------------------

    tic = time.time()  # start the clock
    chi2 = np.zeros((nxy, nxy, ncon))
    if threads == 0:
        toc = time.time()
        for ix, x in enumerate(xys):
            everything = {
                'x': x,
                'cons': cons,
                'ys': xys,
                'cpo': cpo,
                'ix': ix,
                'projected': projected
            }
            chi2[ix, :, :] = chi2_grid(everything)
            if (ix % 50) == 0:
                tc = time.time()
                print('Done ' + str(ix) + '. Time taken: ' + str(tc - toc) +
                      'seconds')
                toc = tc
    else:
        all_vars = []
        for ix in range(nxy):
            everything = {
                'x': xys[ix],
                'cons': cons,
                'ys': xys,
                'cpo': cpo,
                'ix': ix,
                'projected': projected
            }
            all_vars.append(everything)
        pool = Pool(processes=threads)
        chi2 = pool.map(chi2_grid, all_vars)
    tf = time.time()
    if tf - tic > 60:
        print('Total time elapsed: ' + str((tf - tic) / 60.) + 'mins')
    elif tf - tic <= 60:
        print('Total time elapsed: ' + str(tf - tic) + ' seconds')
    chi2 = np.array(chi2)
    best_ix = np.where(chi2 == np.amin(chi2))

    #hack: if the best chi2 is at more than one location, take the first.
    bestx = xys[best_ix[0][0]]
    besty = xys[best_ix[1][0]]
    sep = np.sqrt(bestx**2 + besty**2)
    pa = np.angle(np.complex(bestx, besty), True) % 360
    best_params = [sep, pa, cons[best_ix[2][0]]]
    best_params = np.array(np.array(best_params).ravel())
    print('Separation ' + str(best_params[0]) + ' mas')
    print('Position angle ' + str(best_params[1]) + ' deg')
    print('Contrast Ratio ' + str(best_params[2]))
    # ---------------------------------------------------------------
    #                        sum over each variable so we can visualise it all
    # ---------------------------------------------------------------
    temp_chi2 = ndata * chi2 / np.amin(chi2)
    like = np.exp(-(temp_chi2 - ndata) / 2)
    x_y = np.sum(like, axis=2)

    # ---------------------------------------------------------------
    #                        contour plot!
    # ---------------------------------------------------------------
    names = ['Chi2', 'Likelihood', 'Best Contrast Ratio']
    plots = [np.min(chi2, axis=2), x_y, cons[np.argmin(chi2, axis=2)]]
    for ix, n in enumerate(names):

        plt.figure(n)
        plt.clf()
        # Plot it with RA on the X axis
        plt.imshow(
            plots[ix],
            extent=[np.amin(xys),
                    np.amax(xys),
                    np.amin(xys),
                    np.amax(xys)],
            aspect='auto',
            cmap=cmap)
        plt.colorbar()
        plt.ylabel('Dec (mas)')
        plt.xlabel('RA (mas)')

        plt.plot([0], [0], 'wo')
        plt.xlim(xys[-1], xys[0])
        plt.ylim(xys[0], xys[-1])

    # ---------------------------------------------------------------
    #               And the detection limits that come for free!
    # ---------------------------------------------------------------
    chi2_null = np.sum((cpo.t3data / cpo.t3err)**2)
    # Define the detec limits to be the contrast at which chi2_binary - chi2_null < 25
    detecs = (chi2 - chi2_null) < 25
    detec_lim = np.zeros((nxy, nxy))
    for x_ix in range(nxy):
        for y_ix in range(nxy):
            detectable_cons = cons[detecs[x_ix, y_ix, :]]
            if len(detectable_cons) == 0:
                detec_lim[x_ix, y_ix] = cons[-1]
            else:
                detec_lim[x_ix, y_ix] = np.min(detectable_cons)

    if plot_as_mags:
        detec_lim_plot = -2.5 * np.log10(detec_lim)
    else:
        detec_lim_plot = detec_lim

    plt.figure(1)
    plt.clf()
    #    plt.imshow(detec_lim,extent=(xys[0],xys[-1],xys[0],xys[-1]),cmap=cmap)
    # Plot it with RA on the X axis
    plt.imshow(detec_lim_plot,
               extent=(xys[0], xys[-1], xys[0], xys[-1]),
               cmap=cmap)
    plt.colorbar()
    plt.title('Detection limits')
    plt.xlabel('RA (mas)')
    plt.ylabel('Dec (mas)')
    plt.xlim(xys[-1], xys[0])
    plt.ylim(xys[0], xys[-1])

    # And we should also print whether the likelihood peak is a detection
    #  according to the limits we just calculated
    limit_at_pos = detec_lim[best_ix[0][0], best_ix[1][0]]
    print('Contrast limit at best fit position: ' + str(limit_at_pos))
    if limit_at_pos > best_params[2]:
        print('Detection!')
    else:
        print('No significant detection found')

    data = {
        'chi2': chi2,
        'like': like,
        'xys': xys,
        'cons': cons,
        'best_params': best_params,
        'detec_lim': detec_lim
    }
    return data
Пример #57
0
nf = 2  # 低频成分的个数

# 生成格点
x = np.linspace(0, 1, n_grids)
y = np.linspace(0, 1, n_grids)

# x和y是长度为n_grids的array
# meshgrid会把x和y组合成n_grids*n_grids的array,X和Y对应位置就是所有格点的坐标
X, Y = np.meshgrid(x, y)

# 生成一个0值的傅里叶谱
spectrum = np.zeros((n_grids, n_grids), dtype=np.complex)

# 生成一段噪音,长度是(2*nf+1)**2/2
noise = [
    np.complex(x, y)
    for x, y in np.random.uniform(-1, 1, ((2 * nf + 1)**2 // 2, 2))
]

# 傅里叶频谱的每一项和其共轭关于中心对称
noisy_block = np.concatenate((noise, [0j], np.conjugate(noise[::-1])))

# 将生成的频谱作为低频成分
spectrum[c - nf:c + nf + 1, c - nf:c + nf + 1] = noisy_block.reshape(
    (2 * nf + 1, 2 * nf + 1))

# 进行反傅里叶变换
Z = np.real(np.fft.ifft2(np.fft.ifftshift(spectrum)))

# 创建图表
fig = plt.figure('3D surface & wire')
Пример #58
0
img = cv2.imread('image.png', 0)
plt.imshow(img, cmap='gray')
f = np.fft.fft2(img)
phase = np.angle(f)
fshift = np.fft.fftshift(f)
mag = 20 * (np.log(np.abs((fshift))))
plt.imshow(mag, cmap='gray')
plt.imshow(phase, cmap='gray')

ifshift = np.fft.ifftshift(fshift)
ift = np.fft.ifft2(f)
plt.imshow(np.abs(ift), cmap='gray')

### Program to interchange Phase of two images and plot it
i = np.complex(0, 1)
img1 = cv2.imread('lena.png', 0)
img2 = cv2.imread('mandrill.png', 0)
plt.imshow(img1, cmap='gray')
plt.imshow(img2, cmap='gray')

f1 = np.fft.fft2(img1)
phase1 = np.angle(f1)
fshift1 = np.fft.fftshift(f1)
mag1 = 20 * (np.log(np.abs((fshift1))))
plt.imshow(mag1, cmap='gray')
plt.imshow(phase1, cmap='gray')

f2 = np.fft.fft2(img2)
phase2 = np.angle(f2)
fshift2 = np.fft.fftshift(f2)
Пример #59
0
    def circlefit(self,
                  f_data,
                  z_data,
                  fr=None,
                  Ql=None,
                  refine_results=False,
                  calc_errors=True):
        '''
        performs a circle fit on a frequency vs. complex resonator scattering data set
        Data has to be normalized!!
        INPUT:
        f_data,z_data: input data (frequency, complex S21 data)
        OUTPUT:
        outpus a dictionary {key:value} consisting of the fit values, errors and status information about the fit
        values: {"phi0":phi0, "Ql":Ql, "absolute(Qc)":absQc, "Qi": Qi, "electronic_delay":delay, "complexQc":complQc, "resonance_freq":fr, "prefactor_a":a, "prefactor_alpha":alpha}
        errors: {"phi0_err":phi0_err, "Ql_err":Ql_err, "absolute(Qc)_err":absQc_err, "Qi_err": Qi_err, "electronic_delay_err":delay_err, "resonance_freq_err":fr_err, "prefactor_a_err":a_err, "prefactor_alpha_err":alpha_err}
        for details, see:
            [1] (not diameter corrected) Jiansong Gao, "The Physics of Superconducting Microwave Resonators" (PhD Thesis), Appendix E, California Institute of Technology, (2008)
            [2] (diameter corrected) M. S. Khalil, et. al., J. Appl. Phys. 111, 054510 (2012)
            [3] (fitting techniques) N. CHERNOV AND C. LESORT, "Least Squares Fitting of Circles", Journal of Mathematical Imaging and Vision 23, 239, (2005)
            [4] (further fitting techniques) P. J. Petersan, S. M. Anlage, J. Appl. Phys, 84, 3392 (1998)
        the program fits the circle with the algebraic technique described in [3], the rest of the fitting is done with the scipy.optimize least square fitting toolbox
        also, check out [5] S. Probst et al. "Efficient and reliable analysis of noisy complex scatterung resonator data for superconducting quantum circuits" (in preparation)
        '''

        if fr is None: fr = f_data[np.argmin(np.absolute(z_data))]
        if Ql is None: Ql = 1e6
        xc, yc, r0 = self._fit_circle(z_data, refine_results=refine_results)
        phi0 = -np.arcsin(yc / r0)
        theta0 = self._periodic_boundary(phi0 + np.pi, np.pi)
        z_data_corr = self._center(z_data, np.complex(xc, yc))
        theta0, Ql, fr = self._phase_fit(f_data, z_data_corr, theta0, Ql, fr)
        #print("Ql from phasefit is: " + str(Ql))
        absQc = Ql / (2. * r0)
        complQc = absQc * np.exp(1j * ((-1.) * phi0))
        Qc = 1. / (
            1. / complQc
        ).real  # here, taking the real part of (1/complQc) from diameter correction method
        Qi_dia_corr = 1. / (1. / Ql - 1. / Qc)
        Qi_no_corr = 1. / (1. / Ql - 1. / absQc)

        results = {
            "Qi_dia_corr": Qi_dia_corr,
            "Qi_no_corr": Qi_no_corr,
            "absQc": absQc,
            "Qc_dia_corr": Qc,
            "Ql": Ql,
            "fr": fr,
            "theta0": theta0,
            "phi0": phi0,
            #"prefactor_a": a,
            #"prefactor_alpha": alpha
        }

        #calculation of the error
        p = [fr, absQc, Ql, phi0]
        #chi_square, errors = rt.get_errors(rt.residuals_notch_ideal,f_data,z_data,p)
        if calc_errors == True:
            chi_square, cov = self._get_cov_fast_notch(f_data, z_data, p)
            #chi_square, cov = rt.get_cov(rt.residuals_notch_ideal,f_data,z_data,p)

            if cov is not None:
                errors = np.sqrt(np.diagonal(cov))
                fr_err, absQc_err, Ql_err, phi0_err = errors
                #calc Qi with error prop (sum the squares of the variances and covariaces)
                dQl = 1. / ((1. / Ql - 1. / absQc)**2 * Ql**2)
                dabsQc = -1. / ((1. / Ql - 1. / absQc)**2 * absQc**2)
                Qi_no_corr_err = np.sqrt(
                    (dQl**2 * cov[2][2]) + (dabsQc**2 * cov[1][1]) +
                    (2 * dQl * dabsQc * cov[2][1]))  #with correlations
                #calc Qi dia corr with error prop
                dQl = 1 / ((1 / Ql - np.cos(phi0) / absQc)**2 * Ql**2)
                dabsQc = -np.cos(phi0) / (
                    (1 / Ql - np.cos(phi0) / absQc)**2 * absQc**2)
                dphi0 = -np.sin(phi0) / (
                    (1 / Ql - np.cos(phi0) / absQc)**2 * absQc)
                ##err1 = ( (dQl*cov[2][2])**2 + (dabsQc*cov[1][1])**2 + (dphi0*cov[3][3])**2 )
                err1 = ((dQl**2 * cov[2][2]) + (dabsQc**2 * cov[1][1]) +
                        (dphi0**2 * cov[3][3]))
                err2 = (dQl * dabsQc * cov[2][1] + dQl * dphi0 * cov[2][3] +
                        dabsQc * dphi0 * cov[1][3])
                Qi_dia_corr_err = np.sqrt(err1 +
                                          2 * err2)  # including correlations
                errors = {
                    "phi0_err": phi0_err,
                    "Ql_err": Ql_err,
                    "absQc_err": absQc_err,
                    "fr_err": fr_err,
                    "chi_square": chi_square,
                    "Qi_no_corr_err": Qi_no_corr_err,
                    "Qi_dia_corr_err": Qi_dia_corr_err,
                    "prefactor_a_err":
                    0,  # Temporary until error calculation can be introduced
                    "prefactor_alpha_err":
                    0  # Temporary until error calculation can be introduced
                }
                results.update(errors)
            else:
                print("WARNING: Error calculation failed!")
        else:
            #just calc chisquared:
            fun2 = lambda x: self._residuals_notch_ideal(x, f_data, z_data)**2
            chi_square = 1. / float(len(f_data) - len(p)) * (fun2(p)).sum()
            errors = {"chi_square": chi_square}
            results.update(errors)
        return results
Пример #60
0
 def DDB_file_open(self,filefullpath):
   if not (os.path.isfile(filefullpath)):
     raise Exception('The file "%s" does not exists!' %filefullpath)
   with open(filefullpath,'r') as DDB:
     Flag = 0
     Flag2 = False
     Flag3 = False
     ikpt = 0
     typatdone = 0
     for line in DDB:
       if line.find('natom') > -1:
         self.natom = N.int(line.split()[1])
       if line.find('nkpt') > -1:
         self.nkpt = N.int(line.split()[1])
         self.kpt  = zeros((self.nkpt,3))
       if line.find('ntypat') > -1:
         self.ntypat = N.int(line.split()[1])
       if line.find('nband') > -1:
         self.nband = N.int(line.split()[1])
       if line.find('acell') > -1:
         line = line.replace('D','E')
         tmp = line.split()
         self.acell = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])]
       if Flag2:
         line = line.replace('D','E')
         for ii in N.arange(3,self.ntypat):
           self.amu[ii] = N.float(line.split()[ii-3])
           Flag2 = False
       if line.find('amu') > -1:
         line = line.replace('D','E')
         self.amu = zeros((self.ntypat))
         if self.ntypat > 3:
           for ii in N.arange(3):
             self.amu[ii] = N.float(line.split()[ii+1])
             Flag2 = True 
         else:
           for ii in N.arange(self.ntypat):
             self.amu[ii] = N.float(line.split()[ii+1])
       if line.find(' kpt ') > -1:
         line = line.replace('D','E')
         tmp = line.split()
         self.kpt[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])]
         ikpt = 1
         continue
       if ikpt < self.nkpt and ikpt > 0:
         line = line.replace('D','E')
         tmp = line.split()
         self.kpt[ikpt,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]  
         ikpt += 1
         continue
       if Flag == 2:
         line = line.replace('D','E')
         tmp = line.split()
         self.rprim[2,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]
         Flag = 0
       if Flag == 1:
         line = line.replace('D','E')
         tmp = line.split()
         self.rprim[1,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])]
         Flag = 2
       if line.find('rprim') > -1:
         line = line.replace('D','E')
         tmp = line.split()
         self.rprim[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])]
         Flag = 1
       if Flag3:
         line = line.replace('D','E')
         if (self.natom-typatdone)*1.0/12 < 1.001:
           for ii in N.arange(self.natom-typatdone):
             self.typat[typatdone+ii] = N.float(line.split()[ii])
             Flag3 = False
         else:
           for ii in N.arange(12):
             self.typat[typatdone+ii] = N.float(line.split()[ii])
           typatdone += 12
       if line.find(' typat') > -1:
         self.typat = zeros((self.natom))
         if self.natom > 12:
           for ii in N.arange(12):
             self.typat[ii] = N.float(line.split()[ii+1])
             Flag3 = True
             typatdone = 12
         else:
           for ii in N.arange(self.natom):
             self.typat[ii] = N.float(line.split()[ii+1])
       # Read the actual d2E/dRdR matrix
       if Flag == 3:
         line = line.replace('D','E')
         tmp = line.split()
         self.IFC[int(tmp[0])-1,int(tmp[1])-1,int(tmp[2])-1,int(tmp[3])-1] = \
           complex(float(tmp[4]),float(tmp[5]))
       # Read the current Q-point
       if line.find('qpt') > -1:
         line = line.replace('D','E')
         tmp = line.split()
         self.iqpt = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])]
         Flag = 3
         self.IFC = zeros((3,self.natom,3,self.natom),dtype=complex)