Example #1
0
    def compute_kernel_matrix(self):
        """Compute the whole kernel matrix (see 2.1 from the SVM doc)"""
        print "Computing kernel matrix..."
        n = self.n
        X = self.X
        tau = self.tau

        # 1. compute d
        xxt = X * X.transpose()
        d = s.diag(xxt)
        d = s.matrix(d).transpose()

        # 2. compute A
        ones = s.matrix(s.ones(n))
        A = 0.5 * d * ones
        A += 0.5 * ones.transpose() * d.transpose()
        A -= xxt

        # 3. compute K with Gaussian kernel
        A = -tau*A
        K = s.exp(A)
        assert K.shape == (n,n), "Invalid shape of kernel matrix"
        self.K = K
        print "Finished computing kernel matrix."
        return
Example #2
0
    def __init__(self, respond = None, regressors = None, intercept = False, D = None, d = None, G = None, a = None, b = None, **args):
        """Input: paras where they are expected to be tuple or dictionary"""
        ECRegression.__init__(self,respond, regressors, intercept, D, d, **args)

        if self.intercept and G != None:
            self.G = scipy.zeros((self.n, self.n))
            self.G[1:, 1:] = G
        elif self.intercept and G == None :
            self.G = scipy.identity(self.n)
            self.G[0, 0] = 0.0
        elif not self.intercept and G != None:
            self.G = G
        else:
            self.G = scipy.identity(self.n)
            
        if self.intercept:
            self.a = scipy.zeros((self.n, 1))
            self.a[1:] = a            
            self.b = scipy.zeros((self.n, 1))
            self.b[1:] = b
        else:
            if a is None:
                self.a = scipy.matrix( scipy.zeros((self.n,1)))
            else: self.a = a
            if b is None:
                self.b = scipy.matrix( scipy.ones((self.n,1)))
            else: self.b = b
def make_S(A0, Ak, G0, Gk, phi):

    R = P = scipy.matrix(scipy.zeros((6,6)))

    for i in range(0, 3, 2):
        for j in range(3):
            R[i, j] = Ak[i, j]
    R[1, 1] = R[3, 3] = R[4, 4] = 1.0;
    R[5, 5] = Ak[5, 5]

    P[0, 0] = (A0[0, 0] * cos(phi)**2.0 + A0[1, 0] * sin(phi)**2.0)
    P[0, 1] = (A0[0, 1] * cos(phi)**2.0 + A0[1, 1] * sin(phi)**2.0)
    P[0, 2] = (A0[0, 2] * cos(phi)**2.0 + A0[1, 2] * sin(phi)**2.0)
    P[0, 3] = (A0[3, 3] * sin(2.0 * phi))
    P[1, 0] = sin(phi)**2.0
    P[1, 1] = cos(phi)**2.0
    P[1, 3] = -sin(2.0*phi)
    P[2, 0] = A0[2, 0]
    P[2, 1] = A0[2, 1]
    P[2, 2] = A0[2, 2]
    P[3, 0] = -0.5*sin(2.0*phi)
    P[3, 1] = 0.5*sin(2.0*phi)
    P[3, 3] = cos(2.0*phi)
    P[4, 4] = cos(phi)
    P[4, 5] = -sin(phi)
    P[5, 4] = A0[4, 4] * sin(phi)
    P[5, 5] = A0[5, 5] * cos(phi)

    scipy.savetxt("R", R)
    scipy.savetxt("P", P)
    return scipy.matrix(R.I) * scipy.matrix(P)
Example #4
0
def stream2xyz (u, v, w, mu, r, theta, phi, wedge, nu=0.0):
    """ Converts to galactic x,y,z from custom stream coordinates u,v,w;
    ACCEPTS ONLY 1 POINT AT A TIME - don't know what will happen if arrays are passed in
    stream is aligned along w-axis;  rotation is theta about y-axis, then phi about z-axis
    (See Nathan Cole's thesis, page 17)"""
    theta, phi = (theta*rad), (phi*rad)  #THETA, PHI INPUT SHOULD BE IN DEGREES!!
    # Get uvw origin in xyz
    ra, dec = GCToEq(mu, nu, wedge)
    l, b = EqTolb(ra, dec)
    xyz0 = lbr2xyz(l,b,r)
    # Rotate uvw into xyz
    R_M = sc.matrix([
        [(sc.cos(phi)*sc.cos(theta)), (-1.0*sc.sin(phi)), (sc.cos(phi)*sc.sin(theta))],
        [(sc.sin(phi)*sc.cos(theta)),  (sc.cos(phi)),     (sc.sin(phi)*sc.sin(theta))],
        [(-1.0*sc.sin(theta)),         (0.0),             (sc.cos(theta))]
        ])
    """R_inv = sc.matrix([
        [(sc.sin(theta)*sc.cos(phi)), (-1.0*sc.sin(theta)*sc.sin(phi)), (-1.0*sc.cos(theta))],
        [(sc.sin(phi)),               (sc.cos(phi)),                    (0.0)],
        [(sc.cos(theta)*sc.cos(phi)), (-1.0*sc.cos(theta)*sc.sin(phi)), (sc.sin(theta))]
        ])  OLD CRAP"""
    uvw_M = sc.matrix([u,v,w])
    xyz_M = R_M*uvw_M.T
    xyzR = sc.array(xyz_M)
    # Translate rotated values
    x = xyzR[0] + xyz0[0]
    y = xyzR[1] + xyz0[1]
    z = xyzR[2] + xyz0[2]
    return x[0],y[0],z[0]
Example #5
0
def GetPrincipalAxes(Angle1,Angle2,Angle3):
    "Input: the three euler angles from Tipsy. Output: the three axes..."

    pi = 3.14159265359
    phi =  Angle1 * pi/180.0
    theta = Angle2  * pi/180.0
    psi =  Angle3 * pi/180.0

    a11 = cos(psi) * cos(phi)  - cos(theta) * sin(phi) * sin(psi)   
    a12 = cos(psi) * sin(phi)  + cos(theta) * cos(phi) * sin(psi)   
    a13 = sin(psi) * sin(theta)     
    a21 = -sin(psi) * cos(phi)  - cos(theta) * sin(phi) *  cos(psi)     
    a22 = -sin(psi) * sin(phi)  + cos(theta) * cos(phi) * cos(psi)  
    a23 = cos(psi) * sin(theta)     
    a31 = sin(theta) * sin(phi)     
    a32 = -sin(theta) * cos(phi)    
    a33 = cos(theta)  

    a=scipy.matrix( [[a11,a12,a13],[a21,a22,a23],[ a31,a32,a33]])
    x=scipy.matrix( [[1.0],[0.0],[0.0]])
    y=scipy.matrix( [[0.0],[1.0],[0.0]])
    z=scipy.matrix( [[0.0],[0.0],[1.0]])

#    print a*x
#    print ''
#    print a*y
#    print ''
#    print a*z
    return a*x,a*y,a*z
Example #6
0
    def __init__(self, x, y, z, a, g, h):
        """
		Construct a Scatterer object, encapsulating the shape and material
		properties of a deformed-cylindrical object with sound speed and
		density similar to the surrounding fluid medium.

		Parameters
		----------
		x, y, z : array-like
			Posiions delimiting the central axis of the scatterer.
		a : array-like
			Array of radii along the centerline of the scatterer.
		g : array-like
			Array of sound speed contrasts (sound speed inside the scatterer
			divided by sound speed in the surrounding medium)
		h : array-like
			Array of density contrasts (density inside the scatterer
			divided by density in the surrounding medium)

		"""
        super(Scatterer, self).__init__()
        self.r = sp.matrix([x, y, z])
        self.a = sp.array(a)
        self.g = sp.array(g)
        self.h = sp.array(h)
        self.cum_rotation = sp.matrix(sp.eye(3))
Example #7
0
	def computeVarianceContributions( self, firstDerivatives ) :

		qlist = []
		i = 0
		while i < self.dim :
			#print "Compute var %d" % i
			# derivatives of the eigenvalue for this state
			s = matrix( firstDerivatives[ i,0: ], float64 ).T
			##print s.shape

			# cross probability matrix for this state
			Pi = matrix( self.dirmat[ i,0: ], float64 ).T
			##print Pi.shape
			part1 = diag( arr2lst( Pi.T ) )
			part1 = matrix(part1, float64)
			##print part1.shape
			##print common_type(part1)
			Cp = matrix( part1 - Pi * Pi.T )
			##print common_type(Cp)
			##print Cp.shape
			del part1

			# degree of sensitivity for this state
			q = float( abs( s.T * Cp * s ) )
			del s
			del Pi
			del Cp

			qlist.append( q )

			i += 1

		return matrix( qlist )	
Example #8
0
    def test_poisson3d_7pt(self):
        stencil = array([[[0, 0, 0],
                          [0, -1, 0],
                          [0, 0, 0]],
                         [[0, -1, 0],
                          [-1, 6, -1],
                          [0, -1, 0]],
                         [[0, 0, 0],
                          [0, -1, 0],
                          [0, 0, 0]]])

        cases = []
        cases.append(((1, 1, 1), matrix([[6]])))
        cases.append(((2, 1, 1), matrix([[6, -1],
                                        [-1, 6]])))
        cases.append(((2, 2, 1), matrix([[6, -1, -1, 0],
                                        [-1, 6, 0, -1],
                                        [-1, 0, 6, -1],
                                        [0, -1, -1, 6]])))
        cases.append(((2, 2, 2), matrix([[6, -1, -1, 0, -1, 0, 0, 0],
                                        [-1, 6, 0, -1, 0, -1, 0, 0],
                                        [-1, 0, 6, -1, 0, 0, -1, 0],
                                        [0, -1, -1, 6, 0, 0, 0, -1],
                                        [-1, 0, 0, 0, 6, -1, -1, 0],
                                        [0, -1, 0, 0, -1, 6, 0, -1],
                                        [0, 0, -1, 0, -1, 0, 6, -1],
                                        [0, 0, 0, -1, 0, -1, -1, 6]])))

        for grid, expected in cases:
            result = stencil_grid(stencil, grid).todense()
            assert_equal(result, expected)
    def process_collision_geometry_for_table(self, firsttable, additional_tables = []):

        table_object = CollisionObject()
        table_object.operation.operation = CollisionObjectOperation.ADD
        table_object.header.frame_id = firsttable.pose.header.frame_id
        table_object.header.stamp = rospy.Time.now()

        #create a box for each table
        for table in [firsttable,]+additional_tables:
            object = Shape()
            object.type = Shape.BOX;
            object.dimensions.append(math.fabs(table.x_max-table.x_min))
            object.dimensions.append(math.fabs(table.y_max-table.y_min))
            object.dimensions.append(0.01)
            table_object.shapes.append(object)
  
        #set the origin of the table object in the middle of the firsttable
        table_mat = self.pose_to_mat(firsttable.pose.pose)
        table_offset = scipy.matrix([(firsttable.x_min + firsttable.x_max)/2.0, (firsttable.y_min + firsttable.y_max)/2.0, 0.0]).T
        table_offset_mat = scipy.matrix(scipy.identity(4))
        table_offset_mat[0:3,3] = table_offset
        table_center = table_mat * table_offset_mat
        origin_pose = self.mat_to_pose(table_center)
        table_object.poses.append(origin_pose)

        table_object.id = "table"
        self.object_in_map_pub.publish(table_object)
Example #10
0
def initialize_batch(X_bar0, P_bar0, x_bar0):
    """
    Generate t=0 values for a new iteration from an initial state, covariance
    and a-priori estimate.
    """
    # Get initial state and STM and initialize integrator
    X_bar0_list = X_bar0.T.tolist()[0]
    stm0 = sp.matrix(sp.eye(18))
    stm0_list = sp.eye(18).reshape(1,324).tolist()[0]

    eom = ode(Udot).set_integrator('dop853', atol=1.0E-10, rtol=1.0E-9)
    eom.set_initial_value(X_bar0_list + stm0_list, 0)

    # Accumulate measurement at t=0
    obs0 = OBS[0]
    stn0 = obs0[0]
    comp0, Htilda0 = Htilda_matrix(X_bar0_list, 0, stn0)
    resid0 = [ obs0[1] - float(comp0[0]),
               obs0[2] - float(comp0[1]) ]
    y0 = sp.matrix([resid0]).T
    H0 = Htilda0 * stm0

    L0 = P_bar0.I + H0.T * W * H0
    N0 = P_bar0.I * x_bar0 + H0.T * W * y0

    return [stm0, comp0, resid0, Htilda0, H0, L0, N0, eom]
Example #11
0
	def plotm(self):
		N=24*60
		Lambda = sp.matrix(map(self.T,[i*60 for i in range(N)])).T
		Load = sp.matrix(sp.zeros([N,1]))
		Fs = sp.matrix(sp.zeros([N,1]))
		for i in range(self.ts/60,(self.ts+self.ld)/60):
			Load[i,0]=self.lv
		for i in range(self.ts/60,self.tf/60):
			Fs[i,0]=1

		plt.figure(figsize=(18,12))
		ax1 = plt.subplot2grid((3,5),(0,0),rowspan=1,colspan=5)
		ax1.set_ylabel("Load (W)")
		ax1.plot(sp.array(Load))
		ax1.axis([0,N,0,1])

		ax2 = plt.subplot2grid((3,5),(1,0),rowspan=1,colspan=5)
		ax2.set_ylabel("Feasible")
		ax2.plot(sp.array(Fs))
		ax2.axis([0,N,0,2])
		plt.draw()

		ax3 = plt.subplot2grid((3,5),(2,0),rowspan=1,colspan=5)
		ax3.set_ylabel("Tariff")
		ax3.plot(sp.array(Lambda))
		ax3.axis([0,N,0,40])
		plt.draw()
		return
Example #12
0
        def problem_params(lr, gam, memories, inpst, neurons):
            """
            Return the lowest eigenvector of the classical Hamiltonian
            constructed by the learning rule, gamma, memories, and input.
            """
            # Bias Hamiltonian
            alpha = gam * np.array(inpst)

            # Memory Hamiltonian
            beta = np.zeros((qubits, qubits))
            if lr == "hebb":
                # Hebb rule
                memMat = sp.matrix(memories).T
                beta = sp.triu(memMat * memMat.T) / float(neurons)
            elif lr == "stork":
                # Storkey rule
                Wm = sp.zeros((neurons, neurons))
                for m, mem in enumerate(memories):
                    Am = sp.outer(mem, mem) - sp.eye(neurons)
                    Wm += (Am - Am * Wm - Wm * Am) / float(neurons)
                beta = sp.triu(Wm)
            elif lr == "proj":
                # Moore-Penrose pseudoinverse rule
                memMat = sp.matrix(memories).T
                beta = sp.triu(memMat * sp.linalg.pinv(memMat))

            # Find the eigenvectors
            evals, evecs = sp.linalg.eig(np.diag(alpha) + beta)
            idx = evals.argsort()

            return evals[idx], evecs[:, idx], np.diag(alpha), beta
Example #13
0
def poly_fit(x, y, sig, order, verbose=1):
    n_params = order + 1
    #initialize matrices as arrays
    beta = sc.zeros(n_params, float)
    solution = sc.zeros(n_params, float)
    alpha = sc.zeros( (n_params,n_params), float)
    #Fill Matrices
    for k in range(n_params):
        # Fill beta
        for i in range(len(x)):
            holder = ( y[i]*poly(x[i], k) ) / (sig[i]*sig[i])
            beta[k] = beta[k] + holder
        # Fill alpha
        for l in range(n_params):
            for i in range(len(x)):
                holder = (poly(x[i],l)*poly(x[i],k)) / (sig[i]*sig[i])
                alpha[l,k] = alpha[l,k] + holder
    # Re-type matrices
    beta_m = sc.matrix(beta)
    alpha_m = sc.matrix(alpha)
    #Invert alpha,, then multiply beta on the right by the new matrix
    #epsilon_m = alpha_m.I
    a_m = beta_m * alpha_m.I
    if verbose==1:
        print "beta:\n", beta_m
        print "alpha:\n", alpha_m
        print "best-fit parameter matrix:\n", a_m
    return sc.array(a_m)[0,:]
Example #14
0
def static():
    tmp = scipy.zeros((3, 3), float)

    for i in range(0, len(l)):
        L1 = L0[0, 0] / Lk[1, 1]
        L2 = L0[1, 1] / Lk[1, 1]
        D = scipy.matrix([[math.cos(fi[i]), - math.sin(fi[i]), 0.0],
                          [math.sin(fi[i]),   math.cos(fi[i]), 0.0],
                          [0.0,               0.0,             1.0]])
        B = scipy.matrix([[math.cos(fi[i]),        math.sin(fi[i]),      0.0],
                          [- L1 * math.sin(fi[i]), L2 * math.cos(fi[i]), 0.0],
                          [0.0,                    0.0,                  1.0]])
        tmp += l[i] * D * B

    E = S0 / S * I + tmp * d / S
    E = scipy.matrix(E).I

    tmp = scipy.zeros((3, 3), float)

    for i in range(0, len(l)):
        L1 = L0[0, 0] / Lk[1, 1]
        L2 = L0[1, 1] / Lk[1, 1]
        B = scipy.matrix([[math.cos(fi[i]),        math.sin(fi[i]),      0.0],
                          [- L1 * math.sin(fi[i]), L2 * math.cos(fi[i]), 0.0],
                          [0.0,                    0.0,                  1.0]])
        tmp += l[i] * B.T * Lk * B

    L = E.T * (S0 / S * L0 + d / S * tmp) * E

    print L[0, 0], L[1, 1], L[2, 2]
Example #15
0
def linsys(xdot, x, u, y, x0=None, u0=None):

    # y is required for linsys, but not linearize
    # apparently 'ss' does not support multiple outputs; linearize does
    
    
    As,Bs,Cs,Ds,F0,G0 = linearize(xdot, x, u, y, x0, u0)
    
    sumF0 = 0
    for i in F0:
        sumF0 += i
    if sumF0 > 0.001:
        print('Warning: The system was not linearized about an equilibrium point!')
        print
        print 'xdot at x0 = ', F0
        
        
    if Cs.shape[0] > 1:
        raise ValueError, "C matrix cannot have more than one row; system must be SISO"
        

    A = scipy.matrix(As).astype(np.float)
    B = scipy.matrix(Bs).astype(np.float)
    C = scipy.matrix(Cs).astype(np.float)
    D = scipy.matrix(Ds).astype(np.float)
    
    
    
    sys = 0 #ss(A,B,C,D)
    
    return sys
Example #16
0
def initialize_sequential(X_bar0, P_bar0, x_bar0):
    """
    Generate t=0 values for a new iteration from an initial state, covariance
    and a-priori estimate.
    """
    # Get initial state and STM and initialize integrator
    X_bar0_list = X_bar0.T.tolist()[0]
    stm0 = sp.matrix(sp.eye(18))
    stm0_list = sp.eye(18).reshape(1,324).tolist()[0]

    eom = ode(Udot).set_integrator('dop853', atol=1.0E-10, rtol=1.0E-9)
    eom.set_initial_value(X_bar0_list + stm0_list, 0)

    # Perform measurement update for t=0 observation
    obs0 = OBS[0]
    stn0 = obs0[0]
    comp0, Htilda0 = Htilda_matrix(X_bar0_list, 0, stn0)
    resid0 = [ obs0[1] - float(comp0[0]),
               obs0[2] - float(comp0[1]) ]
    y0 = sp.matrix([resid0]).T
    K0 = P_bar0 * Htilda0.T * (Htilda0 * P_bar0 * Htilda0.T + W.I).I
    x_hat0 = x_bar0 + K0 * (y0 - Htilda0 * x_bar0)
    P0 = (I - K0 * Htilda0) * P_bar0
    #P0 = (I - K0 * Htilda0) * P_bar0 * (I - K0 * Htilda0).T + K0 * W.I * K0.T

    return [stm0, comp0, resid0, Htilda0, x_hat0, P0, eom]
Example #17
0
def marginalize(dist_vars,marg_vars):
    #Initialize marginal dict, same for all dists
    margdist_vars={}
    margdist_vars['dist']=dist_vars['dist']
    #Gaussian
    if dist_vars['dist']=='gaussian':
        N_k=len(dist_vars['w'])#Number of gaussians
        N_D=len(dist_vars['mu'][0])#Dim of orgiginal parameter space
        
        #Initialize remaining components of marg dict, before any marginalization        
        margdist_vars['mu']=dist_vars['mu'][:]
        margdist_vars['cov']=dist_vars['cov'][:]
        margdist_vars['w']=dist_vars['w'][:]
        margdist_vars['vars']=dist_vars['vars'][:]
        
        for marg_var in marg_vars:
            #Get indices of marginalized var in current gaussian
            i_m=margdist_vars['vars'].index(marg_var)
            #Create list of current indices
            i_old=list(range(N_D))
            #remove index of marg_var
            i_old.remove(i_m)
            
            
            #remove marg_var from list of vars
            margdist_vars['vars'].remove(marg_var)
        
            margdist_vars['mu']=[sp.delete(margdist_vars['mu'][i],i_m,0) for i in range(len(margdist_vars['w']))]
            
            #For testing
#            for i in range(N_k):
#                margdist_vars['w'][i]=dist_vars['w'][i]
#                margdist_vars['cov'][i]=sp.delete(sp.delete(margdist_vars['cov'][i],i_m,0),i_m,1)
            
            #Loop over components in mixture
            #marg cov:T_M=L_m-T_m
            #marg weight:w_m=sp.sqrt(2*pi/L_mm)
            for i in range(N_k):
                #invert original covariance matrix
                Lambda=inv(sp.matrix(margdist_vars['cov'][i]))
                #Store marg compononent of 
                L_mm=Lambda[i_m,i_m]
                #Remove marginal component from Lambda
                L_m=sp.delete(sp.delete(Lambda,i_m,0),i_m,1)
                #Construct skew matrix
                l_m=sp.matrix(Lambda[i_m,i_old]+Lambda[i_old,i_m])
                T_m=l_m.T*l_m/(4*L_mm)
                #Construct marginalized covariance matrix
                margdist_vars['cov'][i]=sp.asarray(inv(L_m-T_m))
                #Scale weight
                margdist_vars['w'][i]=sp.sqrt(2*sp.pi/L_mm)*dist_vars['w'][i]
            
            #Update dimensions of marginalized parameter space
            N_D=N_D-1
         
        return margdist_vars
            
                
                
Example #18
0
def connect(sys, Q, inputv, outputv):
    '''
    Index-base interconnection of system

    The system sys is a system typically constructed with append, with
    multiple inputs and outputs. The inputs and outputs are connected
    according to the interconnection matrix Q, and then the final
    inputs and outputs are trimmed according to the inputs and outputs
    listed in inputv and outputv.

    Note: to have this work, inputs and outputs start counting at 1!!!!

    Parameters.
    -----------
    sys: StateSpace Transferfunction
        System to be connected
    Q: 2d array
        Interconnection matrix. First column gives the input to be connected
        second column gives the output to be fed into this input. Negative
        values for the second column mean the feedback is negative, 0 means
        no connection is made
    inputv: 1d array
        list of final external inputs
    outputv: 1d array
        list of final external outputs

    Returns
    -------
    sys: LTI system
        Connected and trimmed LTI system

    Examples
    --------
    >>> sys1 = ss("1. -2; 3. -4", "5.; 7", "6, 8", "9.")
    >>> sys2 = ss("-1.", "1.", "1.", "0.")
    >>> sys = append(sys1, sys2)
    >>> Q = sp.mat([ [ 1, 2], [2, -1] ]) # basically feedback, output 2 in 1
    >>> sysc = connect(sys, Q, [2], [1, 2])
    '''
    # first connect
    K = sp.zeros( (sys.inputs, sys.outputs) )
    for r in sp.array(Q).astype(int):
        inp = r[0]-1
        for outp in r[1:]:
            if outp > 0 and outp <= sys.outputs:
                K[inp,outp-1] = 1.
            elif outp < 0 and -outp >= -sys.outputs:
                K[inp,-outp-1] = -1.
    sys = sys.feedback(sp.matrix(K), sign=1)
    
    # now trim
    Ytrim = sp.zeros( (len(outputv), sys.outputs) )
    Utrim = sp.zeros( (sys.inputs, len(inputv)) )
    for i,u in enumerate(inputv):
        Utrim[u-1,i] = 1.
    for i,y in enumerate(outputv):
        Ytrim[i,y-1] = 1.
    return sp.matrix(Ytrim)*sys*sp.matrix(Utrim)  
Example #19
0
def Pow(M, power = 1):
    """Calculates the principal power of a square Hermitian matrix"""
    assert isHermitian(M, tol = 1e-14)
    D, U = eig(M)
    U = matrix(U)
    E = [x ** power for x in D]
    E = matrix(diag(E))
    Mpow = U * E * inv(U)
    return Mpow
    def reload(self):

        if self.set:
            return False
        else:
            m, n = self.A.shape
            self.W0 = sp.maximum(sp.matrix(sp.random.normal(size=(m, self.rank))), 0)
            self.H0 = sp.maximum(sp.matrix(sp.random.normal(size=(self.rank, n))), 0)
        return True
def EigenVectors(A):
    data = eig(A)
    g0 = data[1][0]
    g1 = data[1][1]
    g0.shape = (2, 1)
    g1.shape = (2, 1)
    g0 = matrix(g0)
    g1 = matrix(g1)
    return g0, g1
Example #22
0
def gen_sqexp_k(theta):
	A=theta[0]
	d=sp.matrix(theta[1:]).T
	
	N=sp.matrix(d).shape[0]
	D=sp.eye(N)
	for i in range(N):
		D[i,i]=1./(d[i,0]**2)
	return lambda x,y:A*sp.exp(-0.5*(x-y).T*D*(x-y))
Example #23
0
def covm(b, x):
    n = len(x)
    p = response(b, x)
    V = sp.zeros((n, n))
    for i in range(0, n):
        V[i, i] = p[i] * (1 - p[i])
    V = sp.matrix(V)
    x = sp.matrix(x)
    return inv(x.T * V * x)
    def test_d_one_triangle(self):
        #sorted test
        v,e = matrix([[0,0],[1,0],[0,1]]),matrix([[0,1,2]])
        sc  = simplicial_complex((v,e))
        assert_equal(sc[0].d.shape,(3,3))

        #d0
        row = sc[1].simplex_to_index[simplex((0,1))]
        assert_equal(sc[0].d[row,0],-1)
        assert_equal(sc[0].d[row,1], 1)

        row = sc[1].simplex_to_index[simplex((1,2))]
        assert_equal(sc[0].d[row,1],-1)
        assert_equal(sc[0].d[row,2], 1)

        row = sc[1].simplex_to_index[simplex((0,2))]
        assert_equal(sc[0].d[row,0],-1)
        assert_equal(sc[0].d[row,2], 1)

        #d1
        col = sc[1].simplex_to_index[simplex((0,1))]
        assert_equal(sc[1].d[0,col], 1)

        col = sc[1].simplex_to_index[simplex((0,2))]
        assert_equal(sc[1].d[0,col],-1)

        col = sc[1].simplex_to_index[simplex((1,2))]
        assert_equal(sc[1].d[0,col], 1)

        #reversed test
        v,e = matrix([[0,0],[1,0],[0,1]]),matrix([[0,2,1]])
        sc  = simplicial_complex((v,e))
        assert_equal(sc[0].d.shape,(3,3))

        #d0
        row = sc[1].simplex_to_index[simplex((0,1))]
        assert_equal(sc[0].d[row,0],-1)
        assert_equal(sc[0].d[row,1], 1)

        row = sc[1].simplex_to_index[simplex((1,2))]
        assert_equal(sc[0].d[row,1],-1)
        assert_equal(sc[0].d[row,2], 1)

        row = sc[1].simplex_to_index[simplex((0,2))]
        assert_equal(sc[0].d[row,0],-1)
        assert_equal(sc[0].d[row,2], 1)

        #d1
        col = sc[1].simplex_to_index[simplex((0,1))]
        assert_equal(sc[1].d[0,col],-1)

        col = sc[1].simplex_to_index[simplex((0,2))]
        assert_equal(sc[1].d[0,col], 1)

        col = sc[1].simplex_to_index[simplex((1,2))]
        assert_equal(sc[1].d[0,col],-1)
def get_transform_matrix(p1,p2,p3,s1,s2,s3):
    pri = matrix([p1,p2,p3]).T
    sec = matrix([s1,s2,s3]).T
   
    pri = np.vstack((pri, [1,1,1]))
    sec = np.vstack((sec, [1,1,1]))
    x1 = np.linalg.solve(pri.T, (sec.T)[:, 0])
    x2 = np.linalg.solve(pri.T, (sec.T)[:, 1])
    transform = np.vstack(((x1.T)[0,0:2], (x2.T)[0,0:2]))
    return transform
Example #26
0
def iteration(b, x, y):
    n = len(y)
    p = response(b, x)
    V = sp.zeros((n, n))
    for i in range(0, n):
        V[i, i] = p[i] * (1 - p[i])
    vdif = sp.matrix(y - p).T
    V = sp.matrix(V)
    x = sp.matrix(x)
    return inv(x.T * V * x) * x.T * vdif
def get_transform(tf_listener, frame1, frame2):
    temp_header = Header()
    temp_header.frame_id = frame1
    temp_header.stamp = rospy.Time(0)
    try:
        frame1_to_frame2 = tf_listener.asMatrix(frame2, temp_header)
    except:
        rospy.logerr("tf transform was not there between %s and %s"%(frame1, frame2))
        return scipy.matrix(scipy.identity(4))
    return scipy.matrix(frame1_to_frame2)
Example #28
0
def kalman_upd(beta,
               V,
               y,
               X,
               s,
               S,
               switch = 0,
               D = None,
               d = None,
               G = None,
               a = None,
               b = None):
    r"""
    This is the update step of kalman filter. 

    .. math::
       :nowrap:

       \begin{eqnarray*}
       e_t &=& y_t -  X_t \beta_{t|t-1} \\
       K_t &=&  V_{t|t-1} X_t^T (\sigma + X_t V_{t|t-1} X_t )^{-1}\\
       \beta_{t|t} &=& \beta_{t|t-1} + K_t e_t\\
       V_{t|t} &=& (I - K_t X_t^T) V_{t|t-1}\\
       \end{eqnarray*}


    
    """
    e = y - X * beta
    K = V * X.T * ( s + X * V * X.T).I
    beta = beta + K * e
    if switch == 1:
        D = scipy.matrix(D)
        d = scipy.matrix(d)
        if DEBUG: print "beta: ", beta
        beta = beta - S * D.T * ( D * S * D.T).I * ( D * beta - d)
        if DEBUG: print "beta: ", beta
    elif switch == 2:
        G = scipy.matrix(G)
        a = scipy.matrix(a)
        b = scipy.matrix(b)
        n = len(beta)
        P = 2* V.I
        q = -2 * V.I.T * beta
        bigG = scipy.empty((2*n, n))
        h = scipy.empty((2*n, 1))
        bigG[:n, :] = -G
        bigG[n:, :] = G
        h[:n, :] = -a
        h[n:, :] = b
        paraset = map(cvxopt.matrix, (P, q, bigG, h, D, d))
        beta = qp(*paraset)['x']
    temp = K*X
    V = (scipy.identity(temp.shape[0]) - temp) * V
    return (beta, V, e, K)
Example #29
0
def gen_dataset(nt, d, lb, ub, kindex, hyp, s=1e-9):
    X = draw_support(d, lb, ub, nt, SUPPORT_UNIFORM)
    D = [[sp.NaN]] * (nt)
    kf = GPdc.kernel(kindex, d, hyp)
    Kxx = GPdc.buildKsym_d(kf, X, D)
    Y = (
        spl.cholesky(Kxx, lower=True) * sp.matrix(sps.norm.rvs(0, 1.0, nt)).T
        + sp.matrix(sps.norm.rvs(0, sp.sqrt(s), nt)).T
    )
    S = sp.matrix([s] * nt).T
    return [X, Y, S, D]
Example #30
0
 def __init__(self, **kwargs):
     CartPoleEnvironment.__init__(self,**kwargs)
     nu = 13.2 #  sec^-2
     tau = self.tau
     
     # linearized movement equations
     self.A = matrix(eye(4))
     self.A[0,1] = tau
     self.A[2,3] = tau
     self.A[1,0] = nu*tau
     self.b = matrix([0.0, nu*tau/9.80665, 0.0, tau])
Example #31
0
 def _SPARSESVD(self, X, nBases):
     k = int(nBases[0])
     (U, S, V) = sparsesvd.sparsesvd(X.tocsc(), k)
     self.W = scipy.matrix(U.T*S)
     self.H = scipy.matrix(V)
     return (self.W, self.H)
Example #32
0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug  5 15:33:25 2020

@author: ignab
"""

import scipy as sp
import numpy as np
from time import perf_counter

N = 50

A = sp.matrix(sp.rand(N, N))  #Matriz principal a utlizar
B = sp.matrix(sp.rand(N, N))  #Matriz secundaria
B1 = sp.matrix(sp.rand(N, 1))  #vector de resultados, lado derecho

print(f"A = \n{A}")
print(f"B = \n{B}")

# multiplicacion de matrices

t1 = perf_counter()
C = A * B  #esto es multiplicacion de arreglos, por eso a las matrices se les agregó np.matrix
t2 = perf_counter()
dt = t2 - t1
print(
    f"El tiempo trancurrido de una multiplicación de matrices de 50 x 50 es de = {dt} s"
)
print(f"C = {C}")
import scipy as sp

a = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]

x = sp.matrix("1 2 3 ; 3 4 5 ; 7 8 10")
print(x)
y = sp.matrix("1 2 3 ; 3 6 9 ; 3 9 3")
print(y)

z = sp.dot(x, y)

print("Matrix multiplication of xy = \n", z)

Example #34
0
def removeQuadOff_ENVI(input_image_path):

    import numpy
    import pylab
    import re
    import scipy
    import scipy.optimize
    import subprocess

    ramp_removed_image_path = "ramp_removed_" + input_image_path[
        input_image_path.rfind("/") + 1:input_image_path.rfind(".")] + ".img"

    assert not os.path.exists(
        ramp_removed_image_path
    ), "\n***** " + ramp_removed_image_path + " already exists, exiting...\n"

    cmd = "\ngdalinfo " + input_image_path + "\n"
    pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
    info = pipe.read()
    pipe.close()

    size = info[re.search("Size is \d+, \d+", info).start(0) +
                8:re.search("Size is \d+, \d+", info).end(0)]

    width, length = size.split(",")

    width = width.strip()
    length = length.strip()

    if info.find("ENVI") < 0:
        out_path = input_image_path[input_image_path.rfind("/") +
                                    1:input_image_path.rfind(".")] + ".img"
        cmd = "\ngdalwarp -of ENVI -srcnodata \"nan\" -dstnodata \"nan\" " + input_image_path + " " + out_path + "\n"
        subprocess.call(cmd, shell=True)
        input_image_path = out_path

    infile = open(input_image_path, "rb")

    indat = pylab.fromfile(infile, pylab.float32,
                           -1).reshape(int(length), int(width))
    #indat = pylab.fromfile(infile,pylab.float32,-1).reshape(int(width) * int(length), -1);

    infile.close()

    x = scipy.arange(0, int(length))
    y = scipy.arange(0, int(width))

    x_grid, y_grid = numpy.meshgrid(x, y)

    indices = numpy.arange(0,
                           int(width) * int(length))

    mx = scipy.asarray(x_grid).reshape(-1)
    my = scipy.asarray(y_grid).reshape(-1)
    d = scipy.asarray(indat).reshape(-1)

    nonan_ids = indices[scipy.logical_not(numpy.isnan(d))]

    mx = mx[nonan_ids]
    my = my[nonan_ids]
    d = d[nonan_ids]

    init_mx = scipy.asarray(x_grid).reshape(-1)[nonan_ids]
    init_my = scipy.asarray(y_grid).reshape(-1)[nonan_ids]
    #ramp_removed = scipy.asarray(indat).reshape(-1)[nonan_ids];
    ramp_removed = scipy.zeros(int(length) * int(width))[nonan_ids]
    init_m_ones = scipy.ones(int(length) * int(width))[nonan_ids]

    #	init_xs = [init_m_ones, init_mx, init_my, scipy.multiply(init_mx,init_my), scipy.power(init_mx,2), scipy.power(init_my,2)];
    init_xs = [init_m_ones, init_mx, init_my]

    p0 = scipy.zeros(len(init_xs))
    p = scipy.zeros(len(init_xs))

    for i in scipy.arange(0, 1):

        m_ones = scipy.ones(scipy.size(mx))

        #		xs     = [m_ones, mx, my, scipy.multiply(mx,my), scipy.power(mx,2), scipy.power(my,2)];
        xs = [m_ones, mx, my]

        G = scipy.vstack(xs).T
        #		print(mx);

        #		print(scipy.size(d), scipy.size(xs));

        plsq = scipy.optimize.leastsq(residuals, p0, args=(d, xs))

        res = d - peval(xs, plsq[0])
        mod = plsq[0]

        p = p + mod
        #		print(plsq[0]);

        synth = G * scipy.matrix(mod).T
        cutoff = res.std(axis=0, ddof=1)
        #print(cutoff);

        indices = numpy.arange(0, numpy.size(mx))
        good_ids = indices[abs(res) <= cutoff]

        #		plt.figure(i + 2);
        #		plt.plot(mx,d,'b.',label='alloff');
        #		plt.plot(mx[good_ids],synth[good_ids],'.',label='fit',color='lightgreen');
        #		plt.plot(mx[bad_ids],d[bad_ids],'r.',label='cull #' + str(i + 1));
        #		plt.legend();

        mx = mx[good_ids]
        my = my[good_ids]
        d = res[good_ids]

        #		ramp_removed = scipy.asarray(ramp_removed - peval(init_xs, plsq[0]));
        ramp_removed = scipy.asarray(ramp_removed + peval(init_xs, plsq[0]))

    d = scipy.asarray(indat).reshape(-1)

    for i in range(0, scipy.size(nonan_ids)):

        d[nonan_ids[i]] = ramp_removed[i]

    ramp_removed = d.reshape(int(length), int(width))

    #	import matplotlib;
    #	matplotlib.pyplot.imshow(scipy.array(indat),interpolation='nearest',origin='lower');
    #	matplotlib.pyplot.show();

    outfile = open(ramp_removed_image_path, "wb")

    outoff = scipy.matrix(ramp_removed, scipy.float32)

    outoff.tofile(outfile)

    outfile.close()
Example #35
0
                  dest="filename",
                  default="big_file",
                  help="input file with two matrices",
                  metavar="FILE")
(options, args) = parser.parse_args()


def read(filename):
    lines = open(filename, 'r').read().splitlines()
    A = []
    B = []
    matrix = A
    for line in lines:
        if line != "":
            matrix.append(map(int, line.split("\t")))
        else:
            matrix = B
    return A, B


def printMatrix(matrix):
    matrix = numpy.array(matrix)
    for line in matrix:
        print "\t".join(map(str, line))


A, B = read(options.filename)
A = scipy.matrix(A)
B = scipy.matrix(B)
C = A * B  # easy and intuitive, isn't it?
Example #36
0
def q12d_local(vertices, lame, mu):
    """local stiffness matrix for two dimensional elasticity on a square element

    Parameters
    ----------
    lame : Float
        Lame's first parameter
    mu : Float 
        shear modulus

    See Also
    --------
    linear_elasticity

    Notes
    -----
    Vertices should be listed in counter-clockwise order::

        [3]----[2]
         |      |
         |      |
        [0]----[1]
    
    Degrees of freedom are enumerated as follows::

        [x=6,y=7]----[x=4,y=5]
            |            |
            |            |
        [x=0,y=1]----[x=2,y=3]

    """

    M = lame + 2 * mu  # P-wave modulus

    R_11 = matrix([[2, -2, -1, 1], [-2, 2, 1, -1], [-1, 1, 2, -2],
                   [1, -1, -2, 2]]) / 6.0

    R_12 = matrix([[1, 1, -1, -1], [-1, -1, 1, 1], [-1, -1, 1, 1],
                   [1, 1, -1, -1]]) / 4.0

    R_22 = matrix([[2, 1, -1, -2], [1, 2, -2, -1], [-1, -2, 2, 1],
                   [-2, -1, 1, 2]]) / 6.0

    F = inv(vstack((vertices[1] - vertices[0], vertices[3] - vertices[0])))

    K = zeros((8, 8))  # stiffness matrix

    E = F.T * matrix([[M, 0], [0, mu]]) * F
    K[0::2,
      0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 + E[1, 0] * R_12.T + E[1,
                                                                     1] * R_22

    E = F.T * matrix([[mu, 0], [0, M]]) * F
    K[1::2,
      1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 + E[1, 0] * R_12.T + E[1,
                                                                     1] * R_22

    E = F.T * matrix([[0, mu], [lame, 0]]) * F
    K[1::2,
      0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 + E[1, 0] * R_12.T + E[1,
                                                                     1] * R_22

    K[0::2, 1::2] = K[1::2, 0::2].T

    K /= det(F)

    return K
Example #37
0
def gap(data,
        run=1,
        nrefs=20,
        mink=1,
        kstep=1,
        maxlim=False,
        maxtime=60,
        file_save_data='Gap_Statistics_report.txt'):
    """
    data is a m x n matrix, where the rows are the observations and the columns the features.
    run is an index that gives marks the "Gapclustering_run_.txt" file
    nrefs is the number of null references. The clusters will be build as follows:
    from mink we increase the number of clusters to be computed in steps of kstep, up to maxlim if maxlim!=0
    or maxlim!=False, or up to a maxtime in minutes. sphere tells whether the data is on an hyper-ball.
    """
    time1 = time.time()
    shape = data.shape
    tops = data.max(axis=0)
    bots = data.min(axis=0)
    dists = sp.matrix(sp.diag(tops - bots))
    rands = sp.random.random_sample(size=(*shape, nrefs))
    for i in range(nrefs):
        rands[:, :, i] = rands[:, :, i] * dists + bots
    gaps = []
    sk = []
    criteria = []
    i = 0
    k = mink
    oldclusters = 0
    old_clusters = []
    oldlabels = 0
    old_labels = []
    numclusters = 0
    num_clusters = []
    while True:
        kmeans = KMeans(n_clusters=k, n_jobs=-1)
        kmeans.fit(data)
        kmc = kmeans.cluster_centers_
        kml = kmeans.labels_
        disp = sum([
            dist(data[m, :], kmc[kml[m], :])**2 / (2 * list(kml).count(kml[m]))
            for m in range(shape[0])
        ])
        del kmeans
        refdisps = sp.zeros((rands.shape[2], ))
        for j in range(rands.shape[2]):
            kmeans = KMeans(n_clusters=k, n_jobs=-1)
            kmeans.fit(rands[:, :, j])
            kmc = kmeans.cluster_centers_
            kml = kmeans.labels_
            refdisps[j] = sum([
                dist(rands[m, :, j], kmc[kml[m], :])**2 /
                (2 * list(kml).count(kml[m])) for m in range(shape[0])
            ])
            del kmeans
        wbar = sp.mean(sp.log(refdisps))
        gaps.append(wbar - sp.log(disp))
        sk.append(sp.std(sp.log(refdisps)) * np.sqrt(1 + 1. / nrefs))
        timen = time.time()
        elapsed_time = (timen - time1) / 60  # mins
        with open(file_save_data.format(run), 'a') as report:
            report.write("{0} clusters done. Time in mins: {1}\n".format(
                k, elapsed_time))
        is_crit_gt_0 = False  # is the gap criteria greater than 0?
        if i >= 1:
            new_crit = gaps[i - 1] - (gaps[i] - sk[i])
            criteria.append(new_crit)
            if (k >= maxlim or new_crit > 0) and maxlim:
                is_crit_gt_0 = True
                numclusters = k - kstep
        if is_crit_gt_0:
            break
        if elapsed_time >= maxtime:
            break
        oldclusters = kmc
        old_clusters.append(kmc)
        oldlabels = kml
        old_labels.append(kml)
        num_clusters.append(k)
        i += 1
        k += kstep
        print(num_clusters[-1], " ", gaps[-1], " ", sk[-1])
    return num_clusters, old_clusters, old_labels, gaps, sk
Example #38
0
def debug_initialize_weights(L_in, L_out):
    return sp.matrix(sp.sin(sp.array(range(1, L_out*(L_in+1)+1))). \
                        reshape((L_out, L_in+1)) / 10)
Example #39
0
result("today|HAM", c.ham.p_word("today"), 0.1111)
result("SPAM|today is secret)", c.p_spam_given_phrase("today is secret"),
       0.4858)

from MachineLearning.linear_regression import linear_regression, gaussian
from scipy import matrix
print "\n=== Linear Regression ==="
x = [3, 4, 5, 6]
y = [0, -1, -2, -3]
(w0, w1), err = linear_regression(x, y)
print "(w0=%.1f, w1=%.1f) err=%.2f" % (w0, w1, err)

x = [2, 4, 6, 8]
y = [2, 5, 5, 8]
(w0, w1), err = linear_regression(x, y)
print "(w0=%.1f, w1=%.1f) err=%.2f" % (w0, w1, err)

x = matrix([[3], [4], [5], [6], [7]])
m, s = gaussian(x)
print "m  = %s" % str(m)
print "s^2= %s" % str(s)

x = matrix([[3], [9], [9], [3]])
m, s = gaussian(x)
print "m  = %s" % str(m)
print "s^2= %s" % str(s)

x = matrix([[3, 8], [4, 7], [5, 5], [6, 3], [7, 2]])
m, s = gaussian(x)
print "m  = %s" % str(m)
print "s^2= %s" % str(s)
USAhousing = pd.read_csv('./Data/USA_Housing.csv')

# independent variables
data = USAhousing[[
    'Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
    'Avg. Area Number of Bedrooms', 'Area Population'
]]
# dependent variable
price = USAhousing['Price']

#########################################################
# with Regularisation (Weights)
# create train and test data sets
x = np.arange(1, 1001)

A_train = sc.matrix(data[1000:])
A_test = sc.matrix(data[:1000])

b_train = sc.matrix(price[1000:])
b_test = sc.matrix(price[:1000])

b_train = sc.transpose(b_train)
b_test = sc.transpose(b_test)

### linear algebra formulae from Columbia edx course ###
# compute Weighted least square (will equal x_hat)
I = np.identity(5)  # Identity matrix
lamb_da = 1  # regularisation = 1 is equivalent to Least Squares
W_ls = la.inv(lamb_da * I + np.transpose(A_train) *
              A_train) * np.transpose(A_train) * b_train
# compute W ridge regression
Example #41
0
    2, 5, 10, 12, 15, 20, 30, 40, 45, 50, 55, 60, 75, 100, 125, 160, 200, 250,
    350, 500, 600, 800, 1000, 2000, 5000, 10000
]

pyplot.figure()
pyplot.subplot(2, 1, 1)
while cont < 10:
    x = []
    y = []
    mem = []
    pyplot.grid()
    i = 0
    while i < 10:
        N = Ns[i]
        print(N)
        A = matrix(rand(N, N))
        B = matrix(rand(N, N))

        t1 = perf_counter()
        C = mimatmul(A, B)
        t2 = perf_counter()

        dt = t2 - t1

        memparcial = 3 * (N**2) * 8
        mem.append(memparcial)
        print(f"Tiempo transcurrido = {dt} s")
        x.append(N)
        y.append(dt)

        #pyplot.subplot(2,2,1)
Example #42
0
def parameters(cmdargs):
    """
    """

    # The Hopfield parameters
    properties = json.load(
        open('data/hopfield_gamma_tune/gamma_problem_inputs.json'))
    hparams = {
        'numNeurons': properties['nQubits'],
        'inputState': properties['inputState'],
        'learningRule': properties['learningRule'],
        'numMemories': len(properties['memories'])
    }

    # Construct memories
    memories = properties['memories']

    # Basic simulation params
    nQubits = hparams['numNeurons']
    T = properties['annealTime']
    dt = 0.01 * T

    # Output parameters
    binary = 1  # Save output files as binary Numpy format
    progressout = 0  # Output simulation progress over anneal timesteps

    eigspecdat = 0  # Output data for eigspec
    eigspecplot = 0  # Plot eigspec
    eigspecnum = 2**nQubits  # Number of eigenvalues to output
    fidelplot = 0  # Plot fidelity
    fideldat = 0  # Output fidelity data
    fidelnumstates = 2**nQubits  # Check fidelity with this number of eigenstates
    overlapdat = 0  # Output overlap data
    overlapplot = 0  # Plot overlap

    # Output directory stuff
    # probdir = 'data/hopfield_gamma_tune/n'+str(nQubits)+'p'+\
    #     str(hparams['numMemories'])+hparams['learningRule']
    # if isinstance(T, collections.Iterable):
    #     probdir += 'MultiT'
    # if os.path.isdir(probdir):
    #     outlist = sorted([ int(name) for name in os.listdir(probdir)
    #                        if name.isdigit() ])
    # else:
    #     outlist = []
    # outnum = outlist[-1] + 1 if outlist else 0
    # outputdir = probdir + '/' + str(outnum) + '/'
    outputdir = 'data/hopfield_gamma_tune'

    probshow = 0  # Print final state probabilities to screen
    probout = 1  # Output probabilities to file
    mingap = 0  # Record the minimum spectral gap

    errchk = 0  # Error-checking on/off (for simulation accuracy)
    eps = 0.01  # Numerical error in normalization condition (1 - norm < eps)

    # Specify a QUBO (convert to Ising = True), or alpha, beta directly
    # (convert = False), and also specify the signs on the Ising Hamiltonian
    # terms (you can specify coefficients too for some problems if needed)
    isingConvert = 0
    isingSigns = {'hx': -1, 'hz': -1, 'hzz': -1}

    # Construct network Ising parameters
    neurons = nQubits

    # This is gamma, the appropriate weighting on the input vector
    isingSigns['hz'] *= float(cmdargs['farg'])

    alpha = sp.array(hparams['inputState'])
    beta = sp.zeros((neurons, neurons))
    delta = sp.array([])

    # Construct the memory matrix according to a learning rule
    if hparams['learningRule'] == 'hebb':
        # Construct pattern matrix according to Hebb's rule
        for i in range(neurons):
            for j in range(neurons):
                for p in range(len(memories)):
                    beta[i, j] += (memories[p][i] * memories[p][j] -
                                   len(memories) * (i == j))
        beta = sp.triu(beta) / float(neurons)
    elif hparams['learningRule'] == 'stork':
        # Construct the memory matrix according to the Storkey learning rule
        memMat = sp.zeros((neurons, neurons))
        for m, mem in enumerate(memories):
            for i in range(neurons):
                for j in range(neurons):
                    hij = sp.sum(
                        [memMat[i, k] * mem[k] for k in range(neurons)])
                    hji = sp.sum(
                        [memMat[j, k] * mem[k] for k in range(neurons)])
                    # Don't forget to make the normalization a float!
                    memMat[i,
                           j] += 1. / neurons * (mem[i] * mem[j] -
                                                 mem[i] * hji - hij * mem[j])
        beta = sp.triu(memMat)
    elif hparams['learningRule'] == 'proj':
        # Construct memory matrix according to the Moore-Penrose pseudoinverse rule
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat * sp.linalg.pinv(memMat))

    # Some outputs
    outputs = {
        'nQubits': nQubits,
        'learningRule': hparams['learningRule'],
        'inputState': hparams['inputState'],
        'memories': memories,
        'answer': memories[0],
        'annealTime': list(T) if isinstance(T, collections.Iterable) else T
    }

    ############################################################################
    ######## All variables must be specified here, do NOT change the keys ######
    ############################################################################

    return {
        'nQubits': nQubits,
        'Q': None,
        'T': T,
        'dt': dt,
        'outputdir': outputdir,
        'errchk': errchk,
        'eps': eps,
        'isingConvert': isingConvert,
        'isingSigns': isingSigns,
        'outputs': outputs,
        'alpha': alpha,
        'beta': beta,
        'delta': delta,
        'eigdat': eigspecdat,
        'eigplot': eigspecplot,
        'eignum': eigspecnum,
        'fiddat': fideldat,
        'fidplot': fidelplot,
        'fidnumstates': fidelnumstates,
        'overlapdat': overlapdat,
        'overlapplot': overlapplot,
        'outdir': outputdir,
        'binary': binary,
        'progressout': progressout,
        'probshow': probshow,
        'probout': probout,
        'mingap': mingap,
        'stateoverlap': None,
        'hzscale': None,
        'hzzscale': None,
        'hxscale': None
    }
Example #43
0
    def find_object_frame_and_bounding_box(self, point_cloud):
        
        #get the name of the frame to use with z-axis being "up" or "normal to surface" 
        #(the cluster will be transformed to this frame, and the resulting box z will be this frame's z)
        #if param is not set, assumes the point cloud's frame is such
        self.base_frame = rospy.get_param("~z_up_frame", point_cloud.header.frame_id)

        #convert from PointCloud to 4xn scipy matrix in the base_frame
        cluster_frame = point_cloud.header.frame_id

        (points, cluster_to_base_frame) = transform_point_cloud(self.tf_listener, point_cloud, self.base_frame)
        if points == None:
            return (None, None, None)
        #print "cluster_to_base_frame:\n", ppmat(cluster_to_base_frame)

        #find the lowest point in the cluster to use as the 'table height'
        table_height = points[2,:].min()

        #run PCA on the x-y dimensions to find the tabletop orientation of the cluster
        (shifted_points, xy_mean) = self.mean_shift_xy(points)
        directions = self.pca(shifted_points[0:2, :])

        #convert the points to object frame:
        #rotate all the points about z so that the shortest direction is parallel to the y-axis (long side of object is parallel to x-axis) 
        #and translate them so that the table height is z=0 (x and y are already centered around the object mean)
        y_axis = scipy.mat([directions[1][0], directions[1][1], 0.])
        z_axis = scipy.mat([0.,0.,1.])
        x_axis = scipy.cross(y_axis, z_axis)
        rotmat = scipy.matrix(scipy.identity(4))
        rotmat[0:3, 0] = x_axis.T
        rotmat[0:3, 1] = y_axis.T
        rotmat[0:3, 2] = z_axis.T
        rotmat[2, 3] = table_height
        object_points = rotmat**-1 * shifted_points

        #remove outliers from the cluster
        object_points = self.remove_outliers(object_points)

        #find the object bounding box in the new object frame as [[xmin, ymin, zmin], [xmax, ymax, zmax]] (coordinates of opposite corners)
        object_bounding_box = [[0]*3 for i in range(2)]
        object_bounding_box_dims = [0]*3
        for dim in range(3):
            object_bounding_box[0][dim] = object_points[dim,:].min()
            object_bounding_box[1][dim] = object_points[dim,:].max()
            object_bounding_box_dims[dim] = object_bounding_box[1][dim] - object_bounding_box[0][dim]

        #now shift the object frame and bounding box so that the z-axis is centered at the middle of the bounding box
        x_offset = object_bounding_box[1][0] - object_bounding_box_dims[0]/2.
        y_offset = object_bounding_box[1][1] - object_bounding_box_dims[1]/2.
        for i in range(2):
            object_bounding_box[i][0] -= x_offset
            object_bounding_box[i][1] -= y_offset
        object_points[0, :] -= x_offset
        object_points[1, :] -= y_offset
        offset_mat = scipy.mat(scipy.identity(4))
        offset_mat[0,3] = x_offset
        offset_mat[1,3] = y_offset
        rotmat = rotmat * offset_mat
        #pdb.set_trace()

        #record the transforms from object frame to base frame and to the original cluster frame,
        #broadcast the object frame to tf, and draw the object frame in rviz
        unshift_mean = scipy.identity(4)
        unshift_mean[0,3] = xy_mean[0]
        unshift_mean[1,3] = xy_mean[1]
        object_to_base_frame = unshift_mean*rotmat
        object_to_cluster_frame = cluster_to_base_frame**-1 * object_to_base_frame

        #broadcast the object frame to tf
        (object_frame_pos, object_frame_quat) = mat_to_pos_and_quat(object_to_cluster_frame)
        self.tf_broadcaster.sendTransform(object_frame_pos, object_frame_quat, rospy.Time.now(), "object_frame", cluster_frame) 

        return (object_points, object_bounding_box_dims, object_bounding_box, object_to_base_frame, object_to_cluster_frame)
Example #44
0
def pose_to_mat(pose):
    quat = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
    pos = scipy.matrix([pose.position.x, pose.position.y, pose.position.z]).T
    mat = scipy.matrix(tf.transformations.quaternion_matrix(quat))
    mat[0:3, 3] = pos
    return mat
Example #45
0
def k_means_init_centroids(X, K):
  randidx = sp.random.randint(0, X.shape[0], size=K)
  return sp.matrix(X[randidx])
Example #46
0
def recover_data(Z, U, K):
  return sp.matrix(Z) * sp.matrix(U[0:K, :])
Example #47
0
import scipy as sp
from scipy.io import loadmat
import matplotlib.pyplot as plt
from funcs import train, learning_curve, validation_curve, poly_features, feature_normalize

# Load data
data = loadmat('ex5data1.mat')
X = sp.matrix(data['X'])
Y = sp.matrix(data['y'])
X_val = sp.matrix(data['Xval'])
Y_val = sp.matrix(data['yval'])
X_test = sp.matrix(data['Xtest'])
Y_test = sp.matrix(data['ytest'])

# Initialze
m = X.shape[0]
m_val = X_val.shape[0]
m_test = X_test.shape[0]
X_a = sp.hstack((sp.ones((m, 1)), X))
X_val_a = sp.hstack((sp.ones((m_val, 1)), X_val))

# Map features
p = 8
X_n, mu, sigma = feature_normalize(poly_features(X, p))
X_poly = sp.hstack((sp.ones((m, 1)), X_n))
X_poly_val = sp.hstack((sp.ones((m_val, 1)), feature_normalize(poly_features(X_val, p), mu, sigma)[0]))
X_poly_test = sp.hstack((sp.ones((m_test, 1)), feature_normalize(poly_features(X_val, p), mu, sigma)[0]))

# Train
_lambda = 0.3
theta = train(X_poly, Y, _lambda)
            elif cat_ordering[im1_lab] < cat_ordering[im2_lab]:
                O_row.append(O_cnt)
                O_column.append(i)
                O_value.append(-1)
                O_row.append(O_cnt)
                O_column.append(i + j + 1)
                O_value.append(1)
                O_cnt += 1
            elif cat_ordering[im1_lab] > cat_ordering[im2_lab]:
                O_row.append(O_cnt)
                O_column.append(i)
                O_value.append(1)
                O_row.append(O_cnt)
                O_column.append(i + j + 1)
                O_value.append(-1)
                O_cnt += 1

    S = csr_matrix((S_value, (S_row, S_column)),
                   (S_cnt, datadict['feat'].shape[0]))
    O = csr_matrix((O_value, (O_row, O_column)),
                   (O_cnt, datadict['feat'].shape[0]))
    print S.shape
    print O.shape
    C_O = scipy.matrix(0.1 * np.ones([O_cnt, 1]))
    C_S = scipy.matrix(0.1 * np.ones([S_cnt, 1]))
    X = scipy.matrix(X)
    w = rank_svm(X, S, O, C_S, C_O)
    np.save(
        "%s/weights_%d_%s" % (zero_shot_weights_directory, idx + 1,
                              datadict['attribute_names'][idx]), w)
Example #49
0
def lda_train2(data=None, label=None):
    """

    Perform Linear Discriminant Analysis on input data.

    Input:
        data (array): input data array of shape (number samples x number features).

        label (array): input data label array of shape (number of samples x 1). Must start in 0 (e.g., label = [0,0,0,1,2] for 5 samples).

    Output:
        success (boolean): indicates whether training was successful (True) or not (False).

    Configurable fields:{"name": "dimreduction.lda.train", "config": {"": ""}, "inputs": ["data", "label"], "outputs": ["success"]}

    See Also:


    Notes:


    Example:


    References:
        .. [1]    ...
        .. [2]    ...
        .. [3]    ...
    """
    # Check inputs
    if data is None:
        raise TypeError, "Please provide input data."
    if label is None:
        raise TypeError, "Please provide input data label."
    if 0 not in label:
        raise TypeError, "Label must start in 0 (e.g., label = [0,0,0,1,2] for 5 samples)."
    # success = False
    try:
        # Compute mean of each set (mi)
        m = []
        for c in set(label):
            m.append(scipy.mean(data[label == c], axis=0))
        m = scipy.array(m)
        # Compute Scatter Matrix of eah set (Si)
        S = []
        for c in set(label):
            S.append(scipy.cov(scipy.array(data[label == c]).T))
        # Compute Within Scatter Matrix (SW)
        SW = 0
        for s in S:
            SW += s
        # Compute Total Mean (mt)
        mt = scipy.mean(data, axis=0)
        # Compute Total Scatter Matrix (ST)
        ST = 0
        for xi in data:
            aux = scipy.matrix(xi - mt)
            ST += aux.T * aux
        # Compute Between Scatter Matrix (SB)
        SB = 0
        for c in set(label):
            aux = scipy.matrix(m[c, :] - mt)
            SB += len(pylab.find(label == c)) * aux.T * aux
        # Solve (Sb - li*Sw)Wi = 0 for the eigenvectors wi
        eigenvalues, v = linalg.eig(SB, SW)
        # Get real part and sort eigenvalues
        real_sorted_eigenvalues = []
        for i in xrange(len(eigenvalues)):
            real_sorted_eigenvalues.append([scipy.real(eigenvalues[i]), i])
        real_sorted_eigenvalues.sort()
        # Get the (nclasses-1) main eigenvectors
        # Assures eigenvalue is not NaN
        nclasses = len(set(label)) - 1
        # nclasses = 5
        eigenvectors = []
        for i in xrange(-1, -len(real_sorted_eigenvalues) - 1, -1):
            if not scipy.isnan(real_sorted_eigenvalues[i][0]):
                eigenvectors.append(v[real_sorted_eigenvalues[i][1]])
            if len(eigenvectors) == nclasses: break

        # Updates variables
        # self.eigen_values = real_sorted_eigenvalues
        # self.eigen_vectors = eigenvectors
        # self.transform_matrix = scipy.matrix(eigenvectors)
        transform_matrix = scipy.matrix(eigenvectors)

        # success = True
        # self.is_trained = True
    except Exception as e:
        print e
        print traceback.format_exc()
    # return success
    return transform_matrix
Example #50
0
def parameters(cmdargs):
    """
    """

    # import problems.hopfield.params as params

    # learningRule = params.learningRules[params.rule]
    learningRule = cmdargs['simtype']
    # nQubits = params.numQubits
    nQubits = 5
    # T = params.annealTime
    T = sp.arange(0.1, 15, 0.5)
    T = 10.0
    dt = 0.01*T
    inputstate = [1,1,-1,-1,1]

    # Output parameters
    output = 1 # Turn on/off all output except final probabilities
    binary = 1 # Save as binary Numpy
    progressout = 0 # Output simulation progress over anneal timesteps

    eigspecdat = 1 # Output data for eigspec
    eigspecplot = 0 # Plot eigspec
    eigspecnum = 2**nQubits # Number of eigenvalues
    fidelplot = 0 # Plot fidelity
    fideldat = 0 # Output fidelity data
    fidelnumstates = 2**nQubits # Check fidelity with this number of eigenstates
    overlapdat = 0 # Output overlap data
    overlapplot = 0 # Plot overlap

    # Output directory stuff
    pnum = 0
    if 32 > cmdargs['instance']:
        pnum = 1
    elif (496-1) < cmdargs['instance']:
        pnum = 3
    else:
        pnum = 2
    probdir = 'data/hopfield_all_n'+str(nQubits)+'p'+str(pnum)+learningRule
#     probdir = 'problems/all_n'+str(nQubits)+'p'+str(pnum)+learningRule
    if isinstance(T, collections.Iterable):
        probdir += 'MultiT'
    if os.path.isdir(probdir):
        outlist = sorted([ int(name) for name in os.listdir(probdir) 
                           if name.isdigit() ])
    else:
        outlist = []
    outnum = outlist[-1] + 1 if outlist else 0
    outputdir = probdir + '/' + str(outnum) + '/'

    probshow = 0 # Print final state probabilities to screen
    probout = 1 # Output probabilities to file
    mingap = 1 # Record the minimum spectral gap

    errchk = 0 # Error-checking on/off (for simulation accuracy)
    eps = 0.01 # Numerical error in normalization condition (1 - norm < eps)

    # Specify a QUBO (convert to Ising = True), or alpha, beta directly 
    # (convert = False), and also specify the signs on the Ising Hamiltonian 
    # terms (you can specify coefficients too for some problems if needed)
    isingConvert = 0
    isingSigns = {'hx': -1, 'hz': -1, 'hzz': -1}

    neurons = nQubits
    memories = []

    # Load the list of memories
    patternSet = pickle.load(open('problems/n5p'+str(pnum)+'mems.dat', 'rb'))
    # Define the right index for each pnum case
    psetIdx = cmdargs['instance']
    if 31 < cmdargs['instance'] < 496:
        psetIdx -= 32
    elif cmdargs['instance'] >= 496:
        psetIdx -= 496
    # Iterate through the set that corresponds to the [corrected] instance number
    for bitstring in patternSet[psetIdx]:
        # Convert binary to Ising spins
        spins = [ 1 if k == '1' else -1 for k in bitstring ]
        memories.append(spins)

    # Make the input the last memory recorded
    # inputstate = params.inputState
    # Add in the input state
    # if params.includeInput and inputstate not in memories:
    #     memories[0] = inputstate
    if inputstate not in memories:
        memories[0] = inputstate

    # This is gamma, the appropriate weighting on the input vector
    isingSigns['hz'] *= 1 - (len(inputstate) - inputstate.count(0))/(2*neurons)

    # Initialize Hamiltonian parameters
    alpha = sp.array(inputstate)
    beta = sp.zeros((neurons,neurons))
    delta = sp.array([])

    # Construct the memory matrix according to a learning rule
    if learningRule == 'hebb':
        # Construct pattern matrix according to Hebb's rule
        for i in range(neurons):
            for j in range(neurons):
                for p in range(len(memories)):
                    beta[i,j] += ( memories[p][i]*memories[p][j] -
                                   len(memories)*(i == j) )
        beta = sp.triu(beta)/float(neurons)
    elif learningRule == 'stork':
        # Construct the memory matrix according to the Storkey learning rule
        memMat = sp.zeros((neurons,neurons))
        for m, mem in enumerate(memories):
            for i in range(neurons):
                for j in range(neurons):
                    hij = sp.sum([ memMat[i,k]*mem[k] for k in range(neurons) ])
                    hji = sp.sum([ memMat[j,k]*mem[k] for k in range(neurons) ])
                    # Don't forget to make the normalization a float!
                    memMat[i,j] += 1./neurons*(mem[i]*mem[j] - mem[i]*hji - 
                                               hij*mem[j])
        beta = sp.triu(memMat)
    elif learningRule == 'proj':
        # Construct memory matrix according to the Moore-Penrose pseudoinverse rule
        memMat = sp.matrix(memories).T
        beta = sp.triu(memMat * sp.linalg.pinv(memMat))

    # Calculate Hamming distance between input state and each memory
    hammingDistance = []
    for mem in memories:
        dist = sp.sum(abs(sp.array(inputstate)-sp.array(mem))/2)
        hammingDistance.append(dist)

    hamMean = sp.average(hammingDistance)
    hamMed = sp.median(hammingDistance)

    # Some outputs
    outputs = {
        'nQubits': nQubits,
        'learningRule': learningRule,
        'outdir': probdir,
        'inputState': inputstate,
        'memories': memories,
        'hammingDistance': {'dist': hammingDistance,
                            'mean': hamMean,
                            'median': hamMed },
        'annealTime': list(T) if isinstance(T, collections.Iterable) else T
               }

    ############################################################################
    ######## All variables must be specified here, do NOT change the keys ######
    ############################################################################

    return {
        'nQubits': nQubits,
        'Q': None,
        'T': T,
        'dt': dt,
        'outputdir': outputdir,
        'errchk': errchk,
        'eps': eps,
        'isingConvert': isingConvert,
        'isingSigns': isingSigns,
        'outputs': outputs,
        'alpha': alpha,
        'beta': beta,
        'delta': delta,
        'eigdat': eigspecdat,
        'eigplot': eigspecplot,
        'eignum': eigspecnum,
        'fiddat': fideldat,
        'fidplot': fidelplot,
        'fidnumstates': fidelnumstates,
        'overlapdat': overlapdat,
        'overlapplot': overlapplot,
        'outdir': outputdir,
        'binary': binary,
        'progressout': progressout,
        'probshow': probshow,
        'probout': probout,
        'mingap': mingap,
        'stateoverlap': None
        }
Example #51
0
def compute_centroids(X, idx, K):
  def compute_mean(x):
    return sp.squeeze(sp.asarray(sp.sum(x, 0))) / x.shape[0]

  return sp.matrix([compute_mean(X[sp.where(idx==k)[0]]) for k in range(0, K)])
Example #52
0
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.

from scipy import stats as sps
from scipy import linalg as spl
import scipy as sp
from matplotlib import pyplot as plt

import GPdc

ni = 100
kf = GPdc.kernel(GPdc.SQUEXP, 2, sp.array([1.3, 0.3, 0.2]))
X = sp.random.uniform(-1, 1, size=[ni, 2])
D = [[sp.NaN]] * ni
Kxx = GPdc.buildKsym_d(kf, X, D)
s = 1e-2
Y = spl.cholesky(Kxx, lower=True) * sp.matrix(sps.norm.rvs(
    0, 1., ni)).T + sp.matrix(sps.norm.rvs(0, s, ni)).T
S = sp.ones(ni) * s
print Y
MLEHYP = GPdc.searchMLEhyp(X, Y, S, D, sp.array([2., 2., 2.]),
                           sp.array([-2., -2., -2.]), GPdc.SQUEXP)
print MLEHYP

MAPHYP = GPdc.searchMAPhyp(X, Y, S, D, sp.array([0., 0., 0.]),
                           sp.array([1., 1., 1.]), GPdc.SQUEXP)
print MAPHYP
Example #53
0
def project_data(X, U, K):
  return sp.matrix(X) * sp.matrix(U[0:K, :].T)
Example #54
0
class TestCursor(unittest.TestCase):

    blocks = matrix([[1, 2, 0, 3], [2, 1, 2, 0], [0, 2, 1, 2]])
    patterns = [
        matrix([[0, 0], [0, 0]]),
        matrix([[1, 1], [1, 1]]),
        matrix([[1, 0], [0, 1]]),
        matrix([[0, 1], [1, 1]])
    ]

    def test_blocksparse_move_look(self):

        c = BlockSparseCursor("K", rax, self.blocks, self.patterns)
        self.assertEqual(
            c.lookup,
            [[0, 2, 6, -1, -1, -1, -1, 21], [1, 3, -1, 7, -1, -1, 20, 22],
             [4, -1, 8, 10, 14, -1, -1, -1], [-1, 5, 9, 11, -1, 15, -1, -1],
             [-1, -1, 12, -1, 16, 18, 23, -1],
             [-1, -1, -1, 13, 17, 19, -1, 24]])

        self.assertEqual(8, c.abs_offset(Coords(2, 2)))
        self.assertEqual(8, c.rel_offset(Coords(2, 2)))

        s = c.move(Coords(down=2, right=2))  # Now at K[8]
        self.assertEqual("addq $64, %%rax", s.gen())

        s = c.move(Coords(down=1, units="blocks"))  # Now at K[12]
        addr, comment = c.look(Coords(right=1, down=1))  # Looking at K[13]
        self.assertEqual(addr.offset.value,
                         8)  # Takes 1 double to go from K[12] to K[13]

        return c

    def test_blocksparse_tab(self):

        c = BlockSparseCursor("K", rax, self.blocks, self.patterns)

        # Tab down
        stmt, disp = c.tab(1, 0)
        self.assertEqual(8 * 4, stmt.inputs[0].value)
        self.assertEqual(4, c.abs_offset(c._cursor))

        # Tab right
        stmt, disp = c.tab(0, 1)
        self.assertEqual(8, c.abs_offset(c._cursor))

        # Tab up
        stmt, disp = c.tab(-1, 0)
        self.assertEqual(6, c.abs_offset(c._cursor))
        self.assertEqual(Coords(), disp)

        # Tab right to zero
        flag = 0
        try:
            c.tab(0, 1)
        except:
            flag = 1
        self.assertEquals(1, flag)

        # Tab right to weird
        stmt, blockstart = c.tab(0, 2)
        self.assertEqual(20, c.abs_offset(c._cursor))
        self.assertEqual(Coords(down=-1), blockstart)

        # Tab down from weird to normal
        c.tab(2, 0)
        self.assertEqual(23, c.abs_offset(c._cursor))

        return c
def NR(f, Df, x0, epsx, epsf, kmax):
    conv = 0
    x = scipy.matrix(x0)
    for k in range(0, kmax):
        r = -f(x)
        J = Df(x)
def calc_sysmat_perp(layer_objs):
    sysmat = matrix([[1.0, 0], [0, 1.0]])
    for layer in layer_objs:
        sysmat = sysmat * layer.tmatrix_perp
    return sysmat
Example #57
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

print("Matriz de 4x4:")
import scipy as sp
from time import perf_counter

n = 4
A = sp.matrix(sp.rand(n, n))
B = sp.matrix(sp.rand(n, n))

t1 = perf_counter()

print(f"A = \n{A}")
print(f"B = \n{B}")

C = A @ B
t2 = perf_counter()
print(f"C = \n{C}")
print(f"C00 = \n{C[0,0]}")
print(f"A00 * B00 = \n{A[0,0]*B[0,0]}")

dt = t2 - t1

print(f"Tiempo transcurrido matriz 4x4: = {dt} s")

print("Matriz de 100x100:")
Example #58
0
def Rot_x(t):
    return sc.matrix([[1.0, 0.0, 0.0], [0.0, sc.cos(t), -1.0 * sc.sin(t)],
                      [0.0, sc.sin(t), sc.cos(t)]])
Example #59
0
import scipy as sp

# Pauli matrices 2 x 2
p0 = sp.matrix([[1, 0],[0, 1]])
p1 = sp.matrix([[0, 1], [1, 0]])
p2 = sp.matrix([[0, -1j], [1j, 0]])
p3 = sp.matrix([[1, 0], [0, -1]])
pauli_matrices = [p0, p1, p2, p3]

# export control
__all__ = [pauli_matrices]
Example #60
0
					ax.plot(*pairs[j], c='black', alpha=a )
			ax.scatter(2,1,4,color='red',marker='o',s=40)

		if fname == None:
			plt.show()
		else:
			fig.savefig(fname)
		plt.close()

if __name__ == '__main__':

	import matplotlib.pyplot as plt
	
	alpha = scipy.array([10,200])
	center = scipy.array([2,1])
	x = scipy.matrix([3,3])
	# x = scipy.random.randint(-3,3,(4,2))
	sfunc = step_size(0.9,1)

	para = parabola.ParabolaDir(alpha,center)

	sgd = SGD(afunc=para,x0=x,sfunc=sfunc)

	print sgd.getSoln()

	# sgd.reset()

	for i in range(200):
		sgd.nsteps(1)
		fname = 'vid5/sgd_q1_{0:03d}'.format(i)
		# ax = plt.gca()